feat: add provider feedback logs
This commit is contained in:
parent
44cc12c0ab
commit
05959e6884
|
|
@ -40,8 +40,10 @@ def _invoke_model(prompt: str, hint: str = "fast") -> str | None:
|
||||||
errors: list[str] = []
|
errors: list[str] = []
|
||||||
sentinel_seen = False
|
sentinel_seen = False
|
||||||
|
|
||||||
|
total = len(commands)
|
||||||
for idx, command in enumerate(commands, start=1):
|
for idx, command in enumerate(commands, start=1):
|
||||||
sys.stderr.write(f"[agents] provider {idx}/{len(commands)} → {command.split()[0]}\n")
|
provider_name = command.split()[0]
|
||||||
|
sys.stderr.write(f"[agents] provider {idx}/{total} → {provider_name}\n")
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
executor, raw_stdout, stderr, returncode = _run_ai_command(command, prompt, repo_root)
|
executor, raw_stdout, stderr, returncode = _run_ai_command(command, prompt, repo_root)
|
||||||
|
|
||||||
|
|
@ -49,13 +51,25 @@ def _invoke_model(prompt: str, hint: str = "fast") -> str | None:
|
||||||
stripped = raw_stdout.strip()
|
stripped = raw_stdout.strip()
|
||||||
if stripped == model.sentinel:
|
if stripped == model.sentinel:
|
||||||
sentinel_seen = True
|
sentinel_seen = True
|
||||||
|
sys.stderr.write(
|
||||||
|
f"[agents] provider {idx}/{total} → {provider_name} returned sentinel (no change)\n"
|
||||||
|
)
|
||||||
|
sys.stderr.flush()
|
||||||
continue
|
continue
|
||||||
return stripped
|
return stripped
|
||||||
|
|
||||||
if returncode == 0:
|
if returncode == 0:
|
||||||
errors.append(f"{executor!r} produced no output")
|
errors.append(f"{executor!r} produced no output")
|
||||||
|
sys.stderr.write(
|
||||||
|
f"[agents] provider {idx}/{total} → {provider_name} returned no output\n"
|
||||||
|
)
|
||||||
|
sys.stderr.flush()
|
||||||
else:
|
else:
|
||||||
errors.append(f"{executor!r} exited with {returncode}: {stderr or 'no stderr'}")
|
errors.append(f"{executor!r} exited with {returncode}: {stderr or 'no stderr'}")
|
||||||
|
sys.stderr.write(
|
||||||
|
f"[agents] provider {idx}/{total} → {provider_name} exited with {returncode}\n"
|
||||||
|
)
|
||||||
|
sys.stderr.flush()
|
||||||
|
|
||||||
if sentinel_seen:
|
if sentinel_seen:
|
||||||
return None
|
return None
|
||||||
|
|
|
||||||
|
|
@ -414,7 +414,13 @@ def build_prompt(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def call_model(model: ModelConfig, prompt: str, model_hint: str, cwd: Path) -> tuple[str, bool]:
|
def call_model(
|
||||||
|
model: ModelConfig,
|
||||||
|
prompt: str,
|
||||||
|
model_hint: str,
|
||||||
|
cwd: Path,
|
||||||
|
context: str = "[runner]",
|
||||||
|
) -> tuple[str, bool]:
|
||||||
"""
|
"""
|
||||||
Invokes the AI model command with the given prompt and captures its output.
|
Invokes the AI model command with the given prompt and captures its output.
|
||||||
|
|
||||||
|
|
@ -435,29 +441,49 @@ def call_model(model: ModelConfig, prompt: str, model_hint: str, cwd: Path) -> t
|
||||||
# Get commands based on hint
|
# Get commands based on hint
|
||||||
commands = model.get_commands_for_hint(model_hint)
|
commands = model.get_commands_for_hint(model_hint)
|
||||||
|
|
||||||
iter_commands = enumerate(commands, start=1)
|
total = len(commands)
|
||||||
|
for idx, command in enumerate(commands, start=1):
|
||||||
for idx, command in iter_commands:
|
provider_name = command.split()[0]
|
||||||
sys.stderr.write(f"[runner] provider {idx}/{len(commands)} → {command.split()[0]}\n")
|
sys.stderr.write(f"{context} provider {idx}/{total} → {provider_name}\n")
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
executor, raw_stdout, stderr, returncode = _run_ai_command(command, prompt, cwd)
|
executor, raw_stdout, stderr, returncode = _run_ai_command(command, prompt, cwd)
|
||||||
|
|
||||||
if raw_stdout:
|
if raw_stdout:
|
||||||
stripped = raw_stdout.strip()
|
stripped = raw_stdout.strip()
|
||||||
if stripped == model.sentinel:
|
if stripped == model.sentinel:
|
||||||
|
sys.stderr.write(
|
||||||
|
f"{context} provider {idx}/{total} → {provider_name} returned sentinel (no change)\n"
|
||||||
|
)
|
||||||
|
sys.stderr.flush()
|
||||||
return raw_stdout, True
|
return raw_stdout, True
|
||||||
if "API Error:" in raw_stdout and "Overloaded" in raw_stdout:
|
if "API Error:" in raw_stdout and "Overloaded" in raw_stdout:
|
||||||
raise PatchGenerationError("Claude API is overloaded (500 error) - please retry later")
|
raise PatchGenerationError("Claude API is overloaded (500 error) - please retry later")
|
||||||
if "<<<AI_DIFF_START>>>" in raw_stdout:
|
if "<<<AI_DIFF_START>>>" in raw_stdout:
|
||||||
|
sys.stderr.write(
|
||||||
|
f"{context} provider {idx}/{total} → {provider_name} produced diff\n"
|
||||||
|
)
|
||||||
|
sys.stderr.flush()
|
||||||
return raw_stdout, False
|
return raw_stdout, False
|
||||||
# Non-empty output without diff markers counts as failure so we can try fallbacks.
|
# Non-empty output without diff markers counts as failure so we can try fallbacks.
|
||||||
errors.append(f"{executor!r} produced non-diff output: {stripped[:80]}")
|
errors.append(f"{executor!r} produced non-diff output: {stripped[:80]}")
|
||||||
|
sys.stderr.write(
|
||||||
|
f"{context} provider {idx}/{total} → {provider_name} non-diff output; trying next\n"
|
||||||
|
)
|
||||||
|
sys.stderr.flush()
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if returncode == 0:
|
if returncode == 0:
|
||||||
errors.append(f"{executor!r} produced no output")
|
errors.append(f"{executor!r} produced no output")
|
||||||
|
sys.stderr.write(
|
||||||
|
f"{context} provider {idx}/{total} → {provider_name} returned no output\n"
|
||||||
|
)
|
||||||
|
sys.stderr.flush()
|
||||||
else:
|
else:
|
||||||
errors.append(f"{executor!r} exited with {returncode}: {stderr or 'no stderr'}")
|
errors.append(f"{executor!r} exited with {returncode}: {stderr or 'no stderr'}")
|
||||||
|
sys.stderr.write(
|
||||||
|
f"{context} provider {idx}/{total} → {provider_name} exited with {returncode}\n"
|
||||||
|
)
|
||||||
|
sys.stderr.flush()
|
||||||
|
|
||||||
raise PatchGenerationError("AI command(s) failed: " + "; ".join(errors))
|
raise PatchGenerationError("AI command(s) failed: " + "; ".join(errors))
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -105,6 +105,7 @@ def process(repo_root: Path, rules: RulesConfig, model: ModelConfig) -> int:
|
||||||
|
|
||||||
# 3) Ask the patcher to build a diff with the assembled instruction.
|
# 3) Ask the patcher to build a diff with the assembled instruction.
|
||||||
try:
|
try:
|
||||||
|
print(f"[runner] generating {output_rel.as_posix()} from {src_rel.as_posix()}")
|
||||||
generate_output(
|
generate_output(
|
||||||
repo_root=repo_root,
|
repo_root=repo_root,
|
||||||
rules=rules,
|
rules=rules,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue