Add AI review logging and --skip-ai-review flag to fabric sync
- Add comprehensive logging to run_ai_scrutiny_review() function - Log tool path checks, warning counts, timing - Log timeout events, JSON parse errors, all exceptions - Previously failures were silently swallowed - Add --skip-ai-review flag for large bulk imports - Prevents rate limiting during initial imports - Passes through daemon_loop, run_sync, sync_pattern, publish_to_registry Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
adefe909c2
commit
66fd121ee4
10
CHANGELOG.md
10
CHANGELOG.md
|
|
@ -37,6 +37,16 @@ All notable changes to CmdForge will be documented in this file.
|
|||
- Rejected version count display
|
||||
- Cleanup result modal with detailed output
|
||||
|
||||
#### Fabric Sync Improvements
|
||||
- **AI review logging**: Added comprehensive logging to `run_ai_scrutiny_review()` function
|
||||
- Logs tool path checks, warning counts, timing, success/failure states
|
||||
- Logs timeout events, JSON parse errors, and all exception types
|
||||
- Previously failures were silently swallowed with `return None`
|
||||
- **`--skip-ai-review` flag**: Skip AI secondary review for large bulk imports
|
||||
- Prevents rate limiting and timeouts during initial large imports
|
||||
- Usage: `python fabric_sync.py --sync --skip-ai-review`
|
||||
- Logs when AI review is skipped with reason
|
||||
|
||||
### Fixed
|
||||
- VERSION_EXISTS error showing after successful publish (made endpoint idempotent by checking config_hash)
|
||||
- My Tools page listing every version separately (now consolidated by tool name)
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@ def run_ai_scrutiny_review(scrutiny_report: dict, config: dict, tool_name: str,
|
|||
# Check if the AI review tool exists
|
||||
tool_path = Path.home() / ".cmdforge" / "scrutiny-ai-review" / "config.yaml"
|
||||
if not tool_path.exists():
|
||||
logger.debug(f" AI review skipped for {tool_name}: scrutiny-ai-review tool not found at {tool_path}")
|
||||
return None
|
||||
|
||||
# Extract warnings from scrutiny report
|
||||
|
|
@ -84,6 +85,7 @@ def run_ai_scrutiny_review(scrutiny_report: dict, config: dict, tool_name: str,
|
|||
]
|
||||
|
||||
if not warnings:
|
||||
logger.debug(f" AI review skipped for {tool_name}: no warnings to review")
|
||||
return None
|
||||
|
||||
# Prepare tool config for review
|
||||
|
|
@ -109,9 +111,13 @@ def run_ai_scrutiny_review(scrutiny_report: dict, config: dict, tool_name: str,
|
|||
break
|
||||
|
||||
if not cmdforge_exe:
|
||||
logger.warning(f" AI review skipped for {tool_name}: cmdforge executable not found")
|
||||
return None
|
||||
|
||||
# Run the tool (use -- to separate cmdforge args from tool args)
|
||||
logger.debug(f" Running AI review for {tool_name} ({len(warnings)} warnings)...")
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[
|
||||
|
|
@ -124,10 +130,31 @@ def run_ai_scrutiny_review(scrutiny_report: dict, config: dict, tool_name: str,
|
|||
timeout=60, # 60 second timeout
|
||||
)
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
logger.debug(f" AI review completed for {tool_name} in {elapsed:.1f}s")
|
||||
return json.loads(result.stdout.strip())
|
||||
else:
|
||||
logger.warning(f" AI review failed for {tool_name}: returncode={result.returncode}, elapsed={elapsed:.1f}s")
|
||||
if result.stderr:
|
||||
logger.warning(f" stderr: {result.stderr[:200]}")
|
||||
return None
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
elapsed = time.time() - start_time
|
||||
logger.warning(f" AI review TIMEOUT for {tool_name} after {elapsed:.1f}s")
|
||||
return None
|
||||
except (subprocess.TimeoutExpired, json.JSONDecodeError, FileNotFoundError):
|
||||
except json.JSONDecodeError as e:
|
||||
elapsed = time.time() - start_time
|
||||
logger.warning(f" AI review JSON parse error for {tool_name}: {e}")
|
||||
return None
|
||||
except FileNotFoundError as e:
|
||||
logger.warning(f" AI review failed for {tool_name}: {e}")
|
||||
return None
|
||||
except Exception as e:
|
||||
elapsed = time.time() - start_time
|
||||
logger.warning(f" AI review unexpected error for {tool_name} after {elapsed:.1f}s: {e}")
|
||||
return None
|
||||
|
||||
|
||||
|
|
@ -319,10 +346,19 @@ def publish_to_registry(
|
|||
config_yaml: str,
|
||||
readme: str,
|
||||
provider: str,
|
||||
auto_approve: bool = False
|
||||
auto_approve: bool = False,
|
||||
skip_ai_review: bool = False
|
||||
) -> tuple[bool, str, dict]:
|
||||
"""Publish a tool directly to the registry database with vetting.
|
||||
|
||||
Args:
|
||||
name: Tool name
|
||||
config_yaml: Tool configuration as YAML string
|
||||
readme: README content
|
||||
provider: AI provider name
|
||||
auto_approve: If True, auto-approve tools that pass scrutiny
|
||||
skip_ai_review: If True, skip AI secondary review (for large bulk imports)
|
||||
|
||||
Returns:
|
||||
Tuple of (success, message, scrutiny_report)
|
||||
"""
|
||||
|
|
@ -365,9 +401,12 @@ def publish_to_registry(
|
|||
except Exception as e:
|
||||
logger.warning(f"Scrutiny failed for {name}: {e}")
|
||||
|
||||
# Run AI secondary review if there are warnings
|
||||
# Run AI secondary review if there are warnings (unless skipped)
|
||||
scrutiny_decision = scrutiny_report.get("decision", "review")
|
||||
if scrutiny_decision == "review":
|
||||
if scrutiny_decision == "review" and skip_ai_review:
|
||||
logger.info(f" AI review skipped for {name} (--skip-ai-review flag)")
|
||||
scrutiny_report["ai_review_skipped"] = True
|
||||
elif scrutiny_decision == "review":
|
||||
try:
|
||||
ai_review = run_ai_scrutiny_review(scrutiny_report, config, name, description)
|
||||
if ai_review:
|
||||
|
|
@ -486,7 +525,8 @@ def sync_pattern(
|
|||
state: SyncState,
|
||||
dry_run: bool = False,
|
||||
publish_to_db: bool = True,
|
||||
auto_approve: bool = False
|
||||
auto_approve: bool = False,
|
||||
skip_ai_review: bool = False
|
||||
) -> bool:
|
||||
"""Sync a single pattern.
|
||||
|
||||
|
|
@ -498,6 +538,7 @@ def sync_pattern(
|
|||
dry_run: If True, don't make changes
|
||||
publish_to_db: If True, publish to registry database; if False, create local files
|
||||
auto_approve: If True, auto-approve tools that pass scrutiny
|
||||
skip_ai_review: If True, skip AI secondary review (for large bulk imports)
|
||||
|
||||
Returns:
|
||||
True if successful
|
||||
|
|
@ -556,7 +597,7 @@ This tool was imported from [Fabric](https://github.com/danielmiessler/fabric) p
|
|||
|
||||
if publish_to_db:
|
||||
# Publish directly to registry database
|
||||
success, message, report = publish_to_registry(name, config_yaml, readme, provider, auto_approve)
|
||||
success, message, report = publish_to_registry(name, config_yaml, readme, provider, auto_approve, skip_ai_review)
|
||||
|
||||
if success:
|
||||
logger.info(f" ✓ {name} -> registry ({message})")
|
||||
|
|
@ -645,7 +686,8 @@ def run_sync(
|
|||
dry_run: bool = False,
|
||||
force_patterns: list[str] = None,
|
||||
publish_to_db: bool = True,
|
||||
auto_approve: bool = False
|
||||
auto_approve: bool = False,
|
||||
skip_ai_review: bool = False
|
||||
) -> dict:
|
||||
"""Run the sync process.
|
||||
|
||||
|
|
@ -658,6 +700,7 @@ def run_sync(
|
|||
force_patterns: List of pattern names to force resync
|
||||
publish_to_db: If True, publish to registry database
|
||||
auto_approve: If True, auto-approve tools that pass scrutiny
|
||||
skip_ai_review: If True, skip AI secondary review (for large bulk imports)
|
||||
|
||||
Returns:
|
||||
Summary dict with counts
|
||||
|
|
@ -696,7 +739,7 @@ def run_sync(
|
|||
logger.info(f"\nSyncing {len(to_sync)} patterns to {dest}...")
|
||||
for name in to_sync:
|
||||
pattern_dir = patterns_dir / name
|
||||
if sync_pattern(pattern_dir, output_dir, provider, state, dry_run, publish_to_db, auto_approve):
|
||||
if sync_pattern(pattern_dir, output_dir, provider, state, dry_run, publish_to_db, auto_approve, skip_ai_review):
|
||||
synced += 1
|
||||
else:
|
||||
failed += 1
|
||||
|
|
@ -768,7 +811,8 @@ def daemon_loop(
|
|||
provider: str,
|
||||
interval: int,
|
||||
publish_to_db: bool = True,
|
||||
auto_approve: bool = False
|
||||
auto_approve: bool = False,
|
||||
skip_ai_review: bool = False
|
||||
):
|
||||
"""Run sync in a loop."""
|
||||
logger.info(f"Starting daemon mode with {interval}s interval")
|
||||
|
|
@ -776,7 +820,8 @@ def daemon_loop(
|
|||
while True:
|
||||
try:
|
||||
run_sync(sync_dir, output_dir, state_file, provider,
|
||||
publish_to_db=publish_to_db, auto_approve=auto_approve)
|
||||
publish_to_db=publish_to_db, auto_approve=auto_approve,
|
||||
skip_ai_review=skip_ai_review)
|
||||
except Exception as e:
|
||||
logger.error(f"Sync failed: {e}")
|
||||
|
||||
|
|
@ -855,6 +900,11 @@ def main():
|
|||
action="store_true",
|
||||
help="Auto-approve tools that pass scrutiny (skip moderation queue)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-ai-review",
|
||||
action="store_true",
|
||||
help="Skip AI secondary review (for large bulk imports to avoid rate limiting)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
|
@ -873,7 +923,8 @@ def main():
|
|||
args.provider,
|
||||
args.interval,
|
||||
publish_to_db=not args.local_only,
|
||||
auto_approve=args.auto_approve
|
||||
auto_approve=args.auto_approve,
|
||||
skip_ai_review=args.skip_ai_review
|
||||
)
|
||||
return 0
|
||||
|
||||
|
|
@ -886,7 +937,8 @@ def main():
|
|||
dry_run=args.dry_run,
|
||||
force_patterns=args.force,
|
||||
publish_to_db=not args.local_only,
|
||||
auto_approve=args.auto_approve
|
||||
auto_approve=args.auto_approve,
|
||||
skip_ai_review=args.skip_ai_review
|
||||
)
|
||||
|
||||
if summary["failed"] > 0:
|
||||
|
|
|
|||
Loading…
Reference in New Issue