feat: add provider feedback logs
This commit is contained in:
parent
44cc12c0ab
commit
05959e6884
|
|
@ -40,8 +40,10 @@ def _invoke_model(prompt: str, hint: str = "fast") -> str | None:
|
|||
errors: list[str] = []
|
||||
sentinel_seen = False
|
||||
|
||||
total = len(commands)
|
||||
for idx, command in enumerate(commands, start=1):
|
||||
sys.stderr.write(f"[agents] provider {idx}/{len(commands)} → {command.split()[0]}\n")
|
||||
provider_name = command.split()[0]
|
||||
sys.stderr.write(f"[agents] provider {idx}/{total} → {provider_name}\n")
|
||||
sys.stderr.flush()
|
||||
executor, raw_stdout, stderr, returncode = _run_ai_command(command, prompt, repo_root)
|
||||
|
||||
|
|
@ -49,13 +51,25 @@ def _invoke_model(prompt: str, hint: str = "fast") -> str | None:
|
|||
stripped = raw_stdout.strip()
|
||||
if stripped == model.sentinel:
|
||||
sentinel_seen = True
|
||||
sys.stderr.write(
|
||||
f"[agents] provider {idx}/{total} → {provider_name} returned sentinel (no change)\n"
|
||||
)
|
||||
sys.stderr.flush()
|
||||
continue
|
||||
return stripped
|
||||
|
||||
if returncode == 0:
|
||||
errors.append(f"{executor!r} produced no output")
|
||||
sys.stderr.write(
|
||||
f"[agents] provider {idx}/{total} → {provider_name} returned no output\n"
|
||||
)
|
||||
sys.stderr.flush()
|
||||
else:
|
||||
errors.append(f"{executor!r} exited with {returncode}: {stderr or 'no stderr'}")
|
||||
sys.stderr.write(
|
||||
f"[agents] provider {idx}/{total} → {provider_name} exited with {returncode}\n"
|
||||
)
|
||||
sys.stderr.flush()
|
||||
|
||||
if sentinel_seen:
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -414,7 +414,13 @@ def build_prompt(
|
|||
)
|
||||
|
||||
|
||||
def call_model(model: ModelConfig, prompt: str, model_hint: str, cwd: Path) -> tuple[str, bool]:
|
||||
def call_model(
|
||||
model: ModelConfig,
|
||||
prompt: str,
|
||||
model_hint: str,
|
||||
cwd: Path,
|
||||
context: str = "[runner]",
|
||||
) -> tuple[str, bool]:
|
||||
"""
|
||||
Invokes the AI model command with the given prompt and captures its output.
|
||||
|
||||
|
|
@ -435,29 +441,49 @@ def call_model(model: ModelConfig, prompt: str, model_hint: str, cwd: Path) -> t
|
|||
# Get commands based on hint
|
||||
commands = model.get_commands_for_hint(model_hint)
|
||||
|
||||
iter_commands = enumerate(commands, start=1)
|
||||
|
||||
for idx, command in iter_commands:
|
||||
sys.stderr.write(f"[runner] provider {idx}/{len(commands)} → {command.split()[0]}\n")
|
||||
total = len(commands)
|
||||
for idx, command in enumerate(commands, start=1):
|
||||
provider_name = command.split()[0]
|
||||
sys.stderr.write(f"{context} provider {idx}/{total} → {provider_name}\n")
|
||||
sys.stderr.flush()
|
||||
executor, raw_stdout, stderr, returncode = _run_ai_command(command, prompt, cwd)
|
||||
|
||||
if raw_stdout:
|
||||
stripped = raw_stdout.strip()
|
||||
if stripped == model.sentinel:
|
||||
sys.stderr.write(
|
||||
f"{context} provider {idx}/{total} → {provider_name} returned sentinel (no change)\n"
|
||||
)
|
||||
sys.stderr.flush()
|
||||
return raw_stdout, True
|
||||
if "API Error:" in raw_stdout and "Overloaded" in raw_stdout:
|
||||
raise PatchGenerationError("Claude API is overloaded (500 error) - please retry later")
|
||||
if "<<<AI_DIFF_START>>>" in raw_stdout:
|
||||
sys.stderr.write(
|
||||
f"{context} provider {idx}/{total} → {provider_name} produced diff\n"
|
||||
)
|
||||
sys.stderr.flush()
|
||||
return raw_stdout, False
|
||||
# Non-empty output without diff markers counts as failure so we can try fallbacks.
|
||||
errors.append(f"{executor!r} produced non-diff output: {stripped[:80]}")
|
||||
sys.stderr.write(
|
||||
f"{context} provider {idx}/{total} → {provider_name} non-diff output; trying next\n"
|
||||
)
|
||||
sys.stderr.flush()
|
||||
continue
|
||||
|
||||
if returncode == 0:
|
||||
errors.append(f"{executor!r} produced no output")
|
||||
sys.stderr.write(
|
||||
f"{context} provider {idx}/{total} → {provider_name} returned no output\n"
|
||||
)
|
||||
sys.stderr.flush()
|
||||
else:
|
||||
errors.append(f"{executor!r} exited with {returncode}: {stderr or 'no stderr'}")
|
||||
sys.stderr.write(
|
||||
f"{context} provider {idx}/{total} → {provider_name} exited with {returncode}\n"
|
||||
)
|
||||
sys.stderr.flush()
|
||||
|
||||
raise PatchGenerationError("AI command(s) failed: " + "; ".join(errors))
|
||||
|
||||
|
|
|
|||
|
|
@ -105,6 +105,7 @@ def process(repo_root: Path, rules: RulesConfig, model: ModelConfig) -> int:
|
|||
|
||||
# 3) Ask the patcher to build a diff with the assembled instruction.
|
||||
try:
|
||||
print(f"[runner] generating {output_rel.as_posix()} from {src_rel.as_posix()}")
|
||||
generate_output(
|
||||
repo_root=repo_root,
|
||||
rules=rules,
|
||||
|
|
|
|||
Loading…
Reference in New Issue