From 05959e68842b8699ff71d50f2343771decd9f5e6 Mon Sep 17 00:00:00 2001 From: rob Date: Sun, 2 Nov 2025 00:40:02 -0300 Subject: [PATCH] feat: add provider feedback logs --- automation/agents.py | 16 +++++++++++++++- automation/patcher.py | 36 +++++++++++++++++++++++++++++++----- automation/runner.py | 1 + 3 files changed, 47 insertions(+), 6 deletions(-) diff --git a/automation/agents.py b/automation/agents.py index fa36f9e..46492a9 100644 --- a/automation/agents.py +++ b/automation/agents.py @@ -40,8 +40,10 @@ def _invoke_model(prompt: str, hint: str = "fast") -> str | None: errors: list[str] = [] sentinel_seen = False + total = len(commands) for idx, command in enumerate(commands, start=1): - sys.stderr.write(f"[agents] provider {idx}/{len(commands)} → {command.split()[0]}\n") + provider_name = command.split()[0] + sys.stderr.write(f"[agents] provider {idx}/{total} → {provider_name}\n") sys.stderr.flush() executor, raw_stdout, stderr, returncode = _run_ai_command(command, prompt, repo_root) @@ -49,13 +51,25 @@ def _invoke_model(prompt: str, hint: str = "fast") -> str | None: stripped = raw_stdout.strip() if stripped == model.sentinel: sentinel_seen = True + sys.stderr.write( + f"[agents] provider {idx}/{total} → {provider_name} returned sentinel (no change)\n" + ) + sys.stderr.flush() continue return stripped if returncode == 0: errors.append(f"{executor!r} produced no output") + sys.stderr.write( + f"[agents] provider {idx}/{total} → {provider_name} returned no output\n" + ) + sys.stderr.flush() else: errors.append(f"{executor!r} exited with {returncode}: {stderr or 'no stderr'}") + sys.stderr.write( + f"[agents] provider {idx}/{total} → {provider_name} exited with {returncode}\n" + ) + sys.stderr.flush() if sentinel_seen: return None diff --git a/automation/patcher.py b/automation/patcher.py index 678f8e9..34145bb 100644 --- a/automation/patcher.py +++ b/automation/patcher.py @@ -414,7 +414,13 @@ def build_prompt( ) -def call_model(model: ModelConfig, prompt: str, model_hint: str, cwd: Path) -> tuple[str, bool]: +def call_model( + model: ModelConfig, + prompt: str, + model_hint: str, + cwd: Path, + context: str = "[runner]", +) -> tuple[str, bool]: """ Invokes the AI model command with the given prompt and captures its output. @@ -435,29 +441,49 @@ def call_model(model: ModelConfig, prompt: str, model_hint: str, cwd: Path) -> t # Get commands based on hint commands = model.get_commands_for_hint(model_hint) - iter_commands = enumerate(commands, start=1) - - for idx, command in iter_commands: - sys.stderr.write(f"[runner] provider {idx}/{len(commands)} → {command.split()[0]}\n") + total = len(commands) + for idx, command in enumerate(commands, start=1): + provider_name = command.split()[0] + sys.stderr.write(f"{context} provider {idx}/{total} → {provider_name}\n") sys.stderr.flush() executor, raw_stdout, stderr, returncode = _run_ai_command(command, prompt, cwd) if raw_stdout: stripped = raw_stdout.strip() if stripped == model.sentinel: + sys.stderr.write( + f"{context} provider {idx}/{total} → {provider_name} returned sentinel (no change)\n" + ) + sys.stderr.flush() return raw_stdout, True if "API Error:" in raw_stdout and "Overloaded" in raw_stdout: raise PatchGenerationError("Claude API is overloaded (500 error) - please retry later") if "<<>>" in raw_stdout: + sys.stderr.write( + f"{context} provider {idx}/{total} → {provider_name} produced diff\n" + ) + sys.stderr.flush() return raw_stdout, False # Non-empty output without diff markers counts as failure so we can try fallbacks. errors.append(f"{executor!r} produced non-diff output: {stripped[:80]}") + sys.stderr.write( + f"{context} provider {idx}/{total} → {provider_name} non-diff output; trying next\n" + ) + sys.stderr.flush() continue if returncode == 0: errors.append(f"{executor!r} produced no output") + sys.stderr.write( + f"{context} provider {idx}/{total} → {provider_name} returned no output\n" + ) + sys.stderr.flush() else: errors.append(f"{executor!r} exited with {returncode}: {stderr or 'no stderr'}") + sys.stderr.write( + f"{context} provider {idx}/{total} → {provider_name} exited with {returncode}\n" + ) + sys.stderr.flush() raise PatchGenerationError("AI command(s) failed: " + "; ".join(errors)) diff --git a/automation/runner.py b/automation/runner.py index 17ad7b4..cce8843 100644 --- a/automation/runner.py +++ b/automation/runner.py @@ -105,6 +105,7 @@ def process(repo_root: Path, rules: RulesConfig, model: ModelConfig) -> int: # 3) Ask the patcher to build a diff with the assembled instruction. try: + print(f"[runner] generating {output_rel.as_posix()} from {src_rel.as_posix()}") generate_output( repo_root=repo_root, rules=rules,