Compare commits
2 Commits
8e6b03cade
...
5e01a335c0
| Author | SHA1 | Date |
|---|---|---|
|
|
5e01a335c0 | |
|
|
78025aac8e |
|
|
@ -241,85 +241,311 @@ def vet_pattern(pattern_dir: Path, provider: str = DEFAULT_PROVIDER) -> tuple[bo
|
|||
return False, f"Vetting error: {e}"
|
||||
|
||||
|
||||
def publish_to_registry(
|
||||
name: str,
|
||||
config_yaml: str,
|
||||
readme: str,
|
||||
provider: str,
|
||||
auto_approve: bool = False
|
||||
) -> tuple[bool, str, dict]:
|
||||
"""Publish a tool directly to the registry database with vetting.
|
||||
|
||||
Returns:
|
||||
Tuple of (success, message, scrutiny_report)
|
||||
"""
|
||||
try:
|
||||
# Add src to path for registry imports
|
||||
src_dir = Path(__file__).parent.parent / "src"
|
||||
if str(src_dir) not in sys.path:
|
||||
sys.path.insert(0, str(src_dir))
|
||||
|
||||
from cmdforge.registry.db import connect_db, query_one
|
||||
from cmdforge.registry.scrutiny import scrutinize_tool
|
||||
from cmdforge.hash_utils import compute_yaml_hash
|
||||
|
||||
# Parse config
|
||||
config = yaml.safe_load(config_yaml)
|
||||
version = config.get("version", "1.0.0")
|
||||
description = config.get("description", "")
|
||||
category = config.get("category")
|
||||
tags = config.get("tags", [])
|
||||
|
||||
conn = connect_db()
|
||||
|
||||
# Check if already exists
|
||||
existing = query_one(
|
||||
conn,
|
||||
"SELECT id, version FROM tools WHERE owner = ? AND name = ?",
|
||||
["official", name],
|
||||
)
|
||||
|
||||
if existing:
|
||||
# Check if same version
|
||||
if existing["version"] == version:
|
||||
conn.close()
|
||||
return True, "Already exists (same version)", {}
|
||||
|
||||
# Run scrutiny
|
||||
scrutiny_report = {}
|
||||
try:
|
||||
scrutiny_report = scrutinize_tool(config_yaml, description, readme)
|
||||
except Exception as e:
|
||||
logger.warning(f"Scrutiny failed for {name}: {e}")
|
||||
|
||||
# Check scrutiny decision
|
||||
scrutiny_decision = scrutiny_report.get("decision", "review")
|
||||
if scrutiny_decision == "reject":
|
||||
fail_findings = [f for f in scrutiny_report.get("findings", []) if f.get("result") == "fail"]
|
||||
fail_msg = fail_findings[0]["message"] if fail_findings else "quality too low"
|
||||
conn.close()
|
||||
return False, f"Rejected by scrutiny: {fail_msg}", scrutiny_report
|
||||
|
||||
# Determine statuses
|
||||
if scrutiny_decision == "approve":
|
||||
scrutiny_status = "approved"
|
||||
elif scrutiny_decision == "review":
|
||||
scrutiny_status = "pending_review"
|
||||
else:
|
||||
scrutiny_status = "pending"
|
||||
|
||||
# Moderation status based on auto_approve setting
|
||||
if auto_approve and scrutiny_status == "approved":
|
||||
moderation_status = "approved"
|
||||
else:
|
||||
moderation_status = "pending"
|
||||
|
||||
# Compute hash
|
||||
config_hash = compute_yaml_hash(config_yaml)
|
||||
|
||||
# Source attribution
|
||||
source_json = json.dumps({
|
||||
"type": "imported",
|
||||
"original_tool": f"fabric/patterns/{name}",
|
||||
"url": "https://github.com/danielmiessler/fabric",
|
||||
"license": "MIT",
|
||||
"author": "Daniel Miessler"
|
||||
})
|
||||
|
||||
tags_json = json.dumps(tags) if tags else "[]"
|
||||
scrutiny_json = json.dumps(scrutiny_report) if scrutiny_report else None
|
||||
|
||||
# Ensure official publisher exists
|
||||
publisher = query_one(conn, "SELECT id FROM publishers WHERE slug = ?", ["official"])
|
||||
if not publisher:
|
||||
conn.execute(
|
||||
"INSERT INTO publishers (email, password_hash, slug, display_name, verified) VALUES (?, ?, ?, ?, ?)",
|
||||
["official@cmdforge.local", "", "official", "Official", True]
|
||||
)
|
||||
publisher_id = conn.execute("SELECT last_insert_rowid()").fetchone()[0]
|
||||
else:
|
||||
publisher_id = publisher["id"]
|
||||
|
||||
if existing:
|
||||
# Update existing tool
|
||||
conn.execute(
|
||||
"""
|
||||
UPDATE tools SET
|
||||
version = ?, description = ?, category = ?, tags = ?,
|
||||
config_yaml = ?, readme = ?, scrutiny_status = ?, scrutiny_report = ?,
|
||||
source_json = ?, config_hash = ?, moderation_status = ?,
|
||||
published_at = ?
|
||||
WHERE id = ?
|
||||
""",
|
||||
[
|
||||
version, description, category, tags_json,
|
||||
config_yaml, readme, scrutiny_status, scrutiny_json,
|
||||
source_json, config_hash, moderation_status,
|
||||
datetime.now(timezone.utc).isoformat(),
|
||||
existing["id"]
|
||||
]
|
||||
)
|
||||
else:
|
||||
# Insert new tool
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO tools (
|
||||
owner, name, version, description, category, tags, config_yaml, readme,
|
||||
publisher_id, scrutiny_status, scrutiny_report, source_json,
|
||||
config_hash, visibility, moderation_status, published_at, downloads
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
[
|
||||
"official", name, version, description, category, tags_json,
|
||||
config_yaml, readme, publisher_id, scrutiny_status, scrutiny_json,
|
||||
source_json, config_hash, "public", moderation_status,
|
||||
datetime.now(timezone.utc).isoformat(), 0
|
||||
]
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
decision = scrutiny_report.get("decision", "unknown")
|
||||
return True, f"Published (scrutiny: {decision}, moderation: {moderation_status})", scrutiny_report
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Registry publish failed: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False, str(e), {}
|
||||
|
||||
|
||||
def sync_pattern(
|
||||
pattern_dir: Path,
|
||||
output_dir: Path,
|
||||
provider: str,
|
||||
state: SyncState,
|
||||
dry_run: bool = False
|
||||
dry_run: bool = False,
|
||||
publish_to_db: bool = True,
|
||||
auto_approve: bool = False
|
||||
) -> bool:
|
||||
"""Sync a single pattern.
|
||||
|
||||
Args:
|
||||
pattern_dir: Path to the pattern directory
|
||||
output_dir: Output directory for local tools (if not publishing to DB)
|
||||
provider: AI provider name
|
||||
state: Sync state object
|
||||
dry_run: If True, don't make changes
|
||||
publish_to_db: If True, publish to registry database; if False, create local files
|
||||
auto_approve: If True, auto-approve tools that pass scrutiny
|
||||
|
||||
Returns:
|
||||
True if successful
|
||||
"""
|
||||
name = pattern_dir.name
|
||||
pattern_hash = hash_pattern(pattern_dir)
|
||||
|
||||
# Vet the pattern
|
||||
passed, reason = vet_pattern(pattern_dir, provider)
|
||||
|
||||
if not passed:
|
||||
logger.warning(f" ✗ {name}: {reason}")
|
||||
state.patterns[name] = {
|
||||
"name": name,
|
||||
"hash": pattern_hash,
|
||||
"status": "failed",
|
||||
"reason": reason,
|
||||
"synced_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
# Read pattern content
|
||||
system_md = pattern_dir / "system.md"
|
||||
if not system_md.exists():
|
||||
logger.warning(f" ✗ {name}: No system.md found")
|
||||
return False
|
||||
|
||||
if dry_run:
|
||||
logger.info(f" [DRY RUN] Would sync: {name} ({reason})")
|
||||
return True
|
||||
system_prompt = system_md.read_text()
|
||||
|
||||
# Import the pattern
|
||||
try:
|
||||
script_dir = Path(__file__).parent
|
||||
# Import helper functions
|
||||
script_dir = Path(__file__).parent
|
||||
if str(script_dir) not in sys.path:
|
||||
sys.path.insert(0, str(script_dir))
|
||||
|
||||
from import_fabric import import_pattern
|
||||
from import_fabric import create_tool_config, pattern_to_display_name
|
||||
|
||||
success = import_pattern(
|
||||
name,
|
||||
pattern_dir.parent,
|
||||
output_dir,
|
||||
provider,
|
||||
dry_run=False,
|
||||
registry_format=False,
|
||||
)
|
||||
# Create tool config
|
||||
config = create_tool_config(name, system_prompt, provider)
|
||||
config_yaml = yaml.dump(config, default_flow_style=False, sort_keys=False)
|
||||
|
||||
# Create README with usage and attribution
|
||||
display_name = pattern_to_display_name(name)
|
||||
readme = f"""# {display_name}
|
||||
|
||||
{config.get('description', '')}
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
cat input.txt | {name}
|
||||
```
|
||||
|
||||
## Source
|
||||
|
||||
This tool was imported from [Fabric](https://github.com/danielmiessler/fabric) patterns.
|
||||
|
||||
- **Original pattern**: `{name}`
|
||||
- **Author**: Daniel Miessler
|
||||
- **License**: MIT
|
||||
"""
|
||||
|
||||
if dry_run:
|
||||
# Quick vet check for dry run
|
||||
passed, reason = vet_pattern(pattern_dir, provider)
|
||||
if passed:
|
||||
logger.info(f" [DRY RUN] Would sync: {name} ({reason})")
|
||||
else:
|
||||
logger.info(f" [DRY RUN] Would skip: {name} ({reason})")
|
||||
return passed
|
||||
|
||||
if publish_to_db:
|
||||
# Publish directly to registry database
|
||||
success, message, report = publish_to_registry(name, config_yaml, readme, provider, auto_approve)
|
||||
|
||||
if success:
|
||||
logger.info(f" ✓ {name}: {reason}")
|
||||
logger.info(f" ✓ {name} -> registry ({message})")
|
||||
state.patterns[name] = {
|
||||
"name": name,
|
||||
"hash": pattern_hash,
|
||||
"status": "synced",
|
||||
"message": message,
|
||||
"synced_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
return True
|
||||
else:
|
||||
logger.error(f" ✗ {name}: Import failed")
|
||||
logger.warning(f" ✗ {name}: {message}")
|
||||
state.patterns[name] = {
|
||||
"name": name,
|
||||
"hash": pattern_hash,
|
||||
"status": "failed",
|
||||
"reason": "Import failed",
|
||||
"reason": message,
|
||||
"synced_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
return False
|
||||
else:
|
||||
# Original behavior: create local files
|
||||
passed, reason = vet_pattern(pattern_dir, provider)
|
||||
|
||||
if not passed:
|
||||
logger.warning(f" ✗ {name}: {reason}")
|
||||
state.patterns[name] = {
|
||||
"name": name,
|
||||
"hash": pattern_hash,
|
||||
"status": "failed",
|
||||
"reason": reason,
|
||||
"synced_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f" ✗ {name}: {e}")
|
||||
state.patterns[name] = {
|
||||
"name": name,
|
||||
"hash": pattern_hash,
|
||||
"status": "failed",
|
||||
"reason": str(e),
|
||||
"synced_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
return False
|
||||
try:
|
||||
from import_fabric import import_pattern
|
||||
|
||||
success = import_pattern(
|
||||
name,
|
||||
pattern_dir.parent,
|
||||
output_dir,
|
||||
provider,
|
||||
dry_run=False,
|
||||
registry_format=False,
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.info(f" ✓ {name} -> {output_dir}/{name}")
|
||||
state.patterns[name] = {
|
||||
"name": name,
|
||||
"hash": pattern_hash,
|
||||
"status": "synced",
|
||||
"synced_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
return True
|
||||
else:
|
||||
logger.error(f" ✗ {name}: Import failed")
|
||||
state.patterns[name] = {
|
||||
"name": name,
|
||||
"hash": pattern_hash,
|
||||
"status": "failed",
|
||||
"reason": "Import failed",
|
||||
"synced_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f" ✗ {name}: {e}")
|
||||
state.patterns[name] = {
|
||||
"name": name,
|
||||
"hash": pattern_hash,
|
||||
"status": "failed",
|
||||
"reason": str(e),
|
||||
"synced_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
return False
|
||||
|
||||
|
||||
def run_sync(
|
||||
|
|
@ -328,10 +554,22 @@ def run_sync(
|
|||
state_file: Path,
|
||||
provider: str,
|
||||
dry_run: bool = False,
|
||||
force_patterns: list[str] = None
|
||||
force_patterns: list[str] = None,
|
||||
publish_to_db: bool = True,
|
||||
auto_approve: bool = False
|
||||
) -> dict:
|
||||
"""Run the sync process.
|
||||
|
||||
Args:
|
||||
sync_dir: Directory for sync data (Fabric clone)
|
||||
output_dir: Output directory for local tools (if not publishing to DB)
|
||||
state_file: Path to state file
|
||||
provider: AI provider name
|
||||
dry_run: If True, don't make changes
|
||||
force_patterns: List of pattern names to force resync
|
||||
publish_to_db: If True, publish to registry database
|
||||
auto_approve: If True, auto-approve tools that pass scrutiny
|
||||
|
||||
Returns:
|
||||
Summary dict with counts
|
||||
"""
|
||||
|
|
@ -365,10 +603,11 @@ def run_sync(
|
|||
failed = 0
|
||||
|
||||
if to_sync:
|
||||
logger.info(f"\nSyncing {len(to_sync)} patterns...")
|
||||
dest = "registry database" if publish_to_db else output_dir
|
||||
logger.info(f"\nSyncing {len(to_sync)} patterns to {dest}...")
|
||||
for name in to_sync:
|
||||
pattern_dir = patterns_dir / name
|
||||
if sync_pattern(pattern_dir, output_dir, provider, state, dry_run):
|
||||
if sync_pattern(pattern_dir, output_dir, provider, state, dry_run, publish_to_db, auto_approve):
|
||||
synced += 1
|
||||
else:
|
||||
failed += 1
|
||||
|
|
@ -438,14 +677,17 @@ def daemon_loop(
|
|||
output_dir: Path,
|
||||
state_file: Path,
|
||||
provider: str,
|
||||
interval: int
|
||||
interval: int,
|
||||
publish_to_db: bool = True,
|
||||
auto_approve: bool = False
|
||||
):
|
||||
"""Run sync in a loop."""
|
||||
logger.info(f"Starting daemon mode with {interval}s interval")
|
||||
|
||||
while True:
|
||||
try:
|
||||
run_sync(sync_dir, output_dir, state_file, provider)
|
||||
run_sync(sync_dir, output_dir, state_file, provider,
|
||||
publish_to_db=publish_to_db, auto_approve=auto_approve)
|
||||
except Exception as e:
|
||||
logger.error(f"Sync failed: {e}")
|
||||
|
||||
|
|
@ -514,6 +756,16 @@ def main():
|
|||
default=DEFAULT_PROVIDER,
|
||||
help=f"Default provider for tools (default: {DEFAULT_PROVIDER})"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--local-only",
|
||||
action="store_true",
|
||||
help="Create local tools only (don't publish to registry database)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--auto-approve",
|
||||
action="store_true",
|
||||
help="Auto-approve tools that pass scrutiny (skip moderation queue)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
|
@ -530,7 +782,9 @@ def main():
|
|||
args.output,
|
||||
state_file,
|
||||
args.provider,
|
||||
args.interval
|
||||
args.interval,
|
||||
publish_to_db=not args.local_only,
|
||||
auto_approve=args.auto_approve
|
||||
)
|
||||
return 0
|
||||
|
||||
|
|
@ -541,7 +795,9 @@ def main():
|
|||
state_file,
|
||||
args.provider,
|
||||
dry_run=args.dry_run,
|
||||
force_patterns=args.force
|
||||
force_patterns=args.force,
|
||||
publish_to_db=not args.local_only,
|
||||
auto_approve=args.auto_approve
|
||||
)
|
||||
|
||||
if summary["failed"] > 0:
|
||||
|
|
|
|||
|
|
@ -91,6 +91,85 @@ def clean_prompt(prompt: str) -> str:
|
|||
return prompt
|
||||
|
||||
|
||||
def extract_description(system_prompt: str, pattern_name: str, max_length: int = 200) -> str:
|
||||
"""Extract a meaningful description from the system prompt.
|
||||
|
||||
Looks for common patterns in Fabric prompts:
|
||||
1. "# IDENTITY and PURPOSE" section - most common
|
||||
2. "# IDENTITY" section
|
||||
3. First paragraph after any title
|
||||
4. First sentence of the prompt
|
||||
"""
|
||||
lines = system_prompt.strip().split('\n')
|
||||
display_name = pattern_to_display_name(pattern_name)
|
||||
|
||||
# Look for IDENTITY and PURPOSE section
|
||||
in_identity = False
|
||||
identity_lines = []
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
line_lower = line.lower().strip()
|
||||
|
||||
# Check for identity section header
|
||||
if 'identity' in line_lower and ('purpose' in line_lower or line_lower.startswith('#')):
|
||||
in_identity = True
|
||||
continue
|
||||
|
||||
# End of identity section (next header)
|
||||
if in_identity and line.strip().startswith('#'):
|
||||
break
|
||||
|
||||
# Skip filler lines in identity section
|
||||
if in_identity:
|
||||
stripped = line.strip()
|
||||
if stripped and not stripped.lower().startswith((
|
||||
'take a deep breath',
|
||||
'think step by step',
|
||||
'follow these steps',
|
||||
'use the following'
|
||||
)):
|
||||
identity_lines.append(stripped)
|
||||
|
||||
if identity_lines:
|
||||
# Use only the first meaningful line(s) that describe the identity
|
||||
desc = identity_lines[0]
|
||||
# Remove common filler phrases
|
||||
desc = re.sub(r'^You are (an? )?', '', desc, flags=re.IGNORECASE)
|
||||
desc = re.sub(r'^You\'re (an? )?', '', desc, flags=re.IGNORECASE)
|
||||
desc = desc.strip()
|
||||
if desc:
|
||||
# Capitalize first letter
|
||||
desc = desc[0].upper() + desc[1:] if len(desc) > 1 else desc.upper()
|
||||
# Truncate if needed
|
||||
if len(desc) > max_length:
|
||||
desc = desc[:max_length-3].rsplit(' ', 1)[0] + '...'
|
||||
return desc
|
||||
|
||||
# Fallback: Look for first meaningful paragraph
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
# Skip headers, empty lines, and short lines
|
||||
if line.startswith('#') or not line or len(line) < 20:
|
||||
continue
|
||||
# Skip common instruction patterns and metadata
|
||||
line_lower = line.lower()
|
||||
if line_lower.startswith((
|
||||
'take a deep breath', 'think step by step', 'input:',
|
||||
'title:', 'introduction'
|
||||
)):
|
||||
continue
|
||||
# Found a content line - use it
|
||||
desc = line
|
||||
# Remove "Title:" prefix if present
|
||||
desc = re.sub(r'^Title:\s*', '', desc, flags=re.IGNORECASE)
|
||||
if len(desc) > max_length:
|
||||
desc = desc[:max_length-3].rsplit(' ', 1)[0] + '...'
|
||||
return desc
|
||||
|
||||
# Last fallback - generic description
|
||||
return f"{display_name} - AI-powered tool from Fabric patterns"
|
||||
|
||||
|
||||
def create_tool_config(
|
||||
pattern_name: str,
|
||||
system_prompt: str,
|
||||
|
|
@ -101,15 +180,17 @@ def create_tool_config(
|
|||
cleaned_prompt = clean_prompt(system_prompt)
|
||||
display_name = pattern_to_display_name(pattern_name)
|
||||
category = get_category(pattern_name)
|
||||
description = extract_description(system_prompt, pattern_name)
|
||||
|
||||
# Build the full prompt with input placeholder
|
||||
full_prompt = f"{cleaned_prompt}\n\n{{input}}"
|
||||
|
||||
config = {
|
||||
"name": pattern_name,
|
||||
"description": f"{display_name} - imported from Fabric patterns",
|
||||
"version": "1.0.0",
|
||||
"description": description,
|
||||
"version": "1.0.2",
|
||||
"category": category,
|
||||
"tags": ["fabric", "imported"],
|
||||
|
||||
# Attribution - marks this as an imported tool
|
||||
"source": {
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ from typing import List
|
|||
|
||||
import yaml
|
||||
|
||||
TOOL_NAME_RE = re.compile(r"^[A-Za-z0-9-]{1,64}$")
|
||||
TOOL_NAME_RE = re.compile(r"^[A-Za-z0-9_-]{1,64}$")
|
||||
SEMVER_RE = re.compile(r"^(\d+)\.(\d+)\.(\d+)(?:-[0-9A-Za-z.-]+)?(?:\+.+)?$")
|
||||
|
||||
REQUIRED_README_SECTIONS = ["## Usage", "## Examples"]
|
||||
|
|
|
|||
|
|
@ -1,19 +1,58 @@
|
|||
"""Tools page - main view for managing tools."""
|
||||
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import yaml
|
||||
|
||||
from PySide6.QtWidgets import (
|
||||
QWidget, QVBoxLayout, QHBoxLayout, QSplitter,
|
||||
QTreeWidget, QTreeWidgetItem, QTextEdit, QLabel,
|
||||
QPushButton, QGroupBox, QMessageBox, QFrame
|
||||
)
|
||||
from PySide6.QtCore import Qt
|
||||
from PySide6.QtGui import QFont
|
||||
from PySide6.QtGui import QFont, QColor, QBrush
|
||||
|
||||
from ...tool import (
|
||||
Tool, ToolArgument, PromptStep, CodeStep, ToolStep,
|
||||
list_tools, load_tool, delete_tool, DEFAULT_CATEGORIES
|
||||
list_tools, load_tool, delete_tool, DEFAULT_CATEGORIES,
|
||||
get_tools_dir
|
||||
)
|
||||
from ...config import load_config
|
||||
from ...hash_utils import compute_config_hash
|
||||
|
||||
|
||||
def get_tool_publish_state(tool_name: str) -> Tuple[str, Optional[str]]:
|
||||
"""
|
||||
Get the publish state of a tool.
|
||||
|
||||
Returns:
|
||||
Tuple of (state, registry_hash) where state is:
|
||||
- "published" - has registry_hash and current hash matches
|
||||
- "modified" - has registry_hash but current hash differs
|
||||
- "local" - no registry_hash (never published/downloaded)
|
||||
"""
|
||||
config_path = get_tools_dir() / tool_name / "config.yaml"
|
||||
if not config_path.exists():
|
||||
return ("local", None)
|
||||
|
||||
try:
|
||||
config = yaml.safe_load(config_path.read_text())
|
||||
registry_hash = config.get("registry_hash")
|
||||
|
||||
if not registry_hash:
|
||||
return ("local", None)
|
||||
|
||||
# Compute current hash (excluding hash fields)
|
||||
current_hash = compute_config_hash(config)
|
||||
|
||||
if current_hash == registry_hash:
|
||||
return ("published", registry_hash)
|
||||
else:
|
||||
return ("modified", registry_hash)
|
||||
except Exception:
|
||||
return ("local", None)
|
||||
|
||||
|
||||
class ToolsPage(QWidget):
|
||||
|
|
@ -169,10 +208,34 @@ class ToolsPage(QWidget):
|
|||
|
||||
# Tools in category
|
||||
for name, tool in sorted(tools_by_category[category], key=lambda x: x[0]):
|
||||
tool_item = QTreeWidgetItem([name])
|
||||
# Get publish state
|
||||
state, registry_hash = get_tool_publish_state(name)
|
||||
|
||||
# Show state indicator in display name
|
||||
if state == "published":
|
||||
display_name = f"{name} ✓"
|
||||
tooltip = "Published to registry - up to date"
|
||||
color = QColor(56, 161, 105) # Green
|
||||
elif state == "modified":
|
||||
display_name = f"{name} ●"
|
||||
tooltip = "Published to registry - local modifications"
|
||||
color = QColor(221, 107, 32) # Orange
|
||||
else:
|
||||
display_name = name
|
||||
tooltip = "Local tool - not published"
|
||||
color = None
|
||||
|
||||
tool_item = QTreeWidgetItem([display_name])
|
||||
tool_item.setData(0, Qt.UserRole, name)
|
||||
|
||||
if color:
|
||||
tool_item.setForeground(0, QBrush(color))
|
||||
|
||||
# Build tooltip
|
||||
if tool.source and tool.source.type == "imported":
|
||||
tool_item.setToolTip(0, f"Imported from {tool.source.url or 'registry'}")
|
||||
tooltip = f"Imported from {tool.source.url or 'registry'}"
|
||||
tool_item.setToolTip(0, tooltip)
|
||||
|
||||
cat_item.addChild(tool_item)
|
||||
|
||||
self.tool_tree.addTopLevelItem(cat_item)
|
||||
|
|
@ -224,6 +287,21 @@ class ToolsPage(QWidget):
|
|||
if tool.description:
|
||||
lines.append(f"<p style='color: #4a5568; margin-bottom: 16px;'>{tool.description}</p>")
|
||||
|
||||
# Publish state
|
||||
state, registry_hash = get_tool_publish_state(tool.name)
|
||||
if state == "published":
|
||||
lines.append(
|
||||
"<p style='background: #c6f6d5; color: #276749; padding: 6px 10px; "
|
||||
"border-radius: 4px; margin-bottom: 12px; font-size: 12px;'>"
|
||||
"✓ Published to registry - up to date</p>"
|
||||
)
|
||||
elif state == "modified":
|
||||
lines.append(
|
||||
"<p style='background: #feebc8; color: #c05621; padding: 6px 10px; "
|
||||
"border-radius: 4px; margin-bottom: 12px; font-size: 12px;'>"
|
||||
"● Modified since last publish - republish to update registry</p>"
|
||||
)
|
||||
|
||||
# Source info
|
||||
if tool.source:
|
||||
source_type = tool.source.type
|
||||
|
|
|
|||
|
|
@ -0,0 +1,122 @@
|
|||
"""Content hash utilities for tool integrity verification.
|
||||
|
||||
This module provides consistent SHA256 hashing for tool content,
|
||||
used for:
|
||||
- Publish state tracking (detect local modifications)
|
||||
- Download integrity verification
|
||||
- Registry content verification
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
from typing import Optional, Dict, Any
|
||||
import yaml
|
||||
|
||||
|
||||
def compute_config_hash(config: Dict[str, Any], exclude_fields: Optional[list] = None) -> str:
|
||||
"""Compute SHA256 hash of tool configuration.
|
||||
|
||||
The hash is computed from a normalized YAML representation to ensure
|
||||
consistent hashing regardless of field order or formatting.
|
||||
|
||||
Args:
|
||||
config: Tool configuration dictionary
|
||||
exclude_fields: Fields to exclude from hashing (e.g., 'published_hash', 'registry_hash')
|
||||
|
||||
Returns:
|
||||
Hash string in format "sha256:<64-char-hex>"
|
||||
"""
|
||||
if exclude_fields is None:
|
||||
exclude_fields = ['published_hash', 'registry_hash']
|
||||
|
||||
# Create a copy without excluded fields
|
||||
config_copy = {k: v for k, v in config.items() if k not in exclude_fields}
|
||||
|
||||
# Normalize to YAML with sorted keys for consistent ordering
|
||||
normalized = yaml.dump(config_copy, sort_keys=True, default_flow_style=False)
|
||||
|
||||
# Compute SHA256
|
||||
hash_bytes = hashlib.sha256(normalized.encode('utf-8')).hexdigest()
|
||||
|
||||
return f"sha256:{hash_bytes}"
|
||||
|
||||
|
||||
def compute_yaml_hash(yaml_content: str, exclude_fields: Optional[list] = None) -> str:
|
||||
"""Compute SHA256 hash of YAML content string.
|
||||
|
||||
Parses the YAML, normalizes it, and computes hash.
|
||||
Useful for hashing raw config.yaml content.
|
||||
|
||||
Args:
|
||||
yaml_content: Raw YAML string
|
||||
exclude_fields: Fields to exclude from hashing
|
||||
|
||||
Returns:
|
||||
Hash string in format "sha256:<64-char-hex>"
|
||||
"""
|
||||
config = yaml.safe_load(yaml_content)
|
||||
if config is None:
|
||||
config = {}
|
||||
return compute_config_hash(config, exclude_fields)
|
||||
|
||||
|
||||
def compute_file_hash(file_path: str) -> str:
|
||||
"""Compute SHA256 hash of a file's contents.
|
||||
|
||||
Used for hashing pattern files (e.g., Fabric's system.md).
|
||||
|
||||
Args:
|
||||
file_path: Path to file
|
||||
|
||||
Returns:
|
||||
Hash string in format "sha256:<64-char-hex>"
|
||||
"""
|
||||
with open(file_path, 'rb') as f:
|
||||
hash_bytes = hashlib.sha256(f.read()).hexdigest()
|
||||
return f"sha256:{hash_bytes}"
|
||||
|
||||
|
||||
def verify_hash(content: str, expected_hash: str) -> bool:
|
||||
"""Verify content matches expected hash.
|
||||
|
||||
Args:
|
||||
content: Content to verify (YAML string)
|
||||
expected_hash: Expected hash in format "sha256:<hex>"
|
||||
|
||||
Returns:
|
||||
True if hash matches, False otherwise
|
||||
"""
|
||||
if not expected_hash or not expected_hash.startswith("sha256:"):
|
||||
return False
|
||||
|
||||
computed = compute_yaml_hash(content)
|
||||
return computed == expected_hash
|
||||
|
||||
|
||||
def extract_hash_value(hash_string: str) -> Optional[str]:
|
||||
"""Extract the hex value from a hash string.
|
||||
|
||||
Args:
|
||||
hash_string: Hash in format "sha256:<hex>"
|
||||
|
||||
Returns:
|
||||
The hex value without prefix, or None if invalid
|
||||
"""
|
||||
if not hash_string or not hash_string.startswith("sha256:"):
|
||||
return None
|
||||
return hash_string[7:] # Remove "sha256:" prefix
|
||||
|
||||
|
||||
def short_hash(hash_string: str, length: int = 8) -> str:
|
||||
"""Get a shortened version of a hash for display.
|
||||
|
||||
Args:
|
||||
hash_string: Full hash string
|
||||
length: Number of hex chars to include
|
||||
|
||||
Returns:
|
||||
Shortened hash (e.g., "sha256:abc123...")
|
||||
"""
|
||||
hex_value = extract_hash_value(hash_string)
|
||||
if hex_value:
|
||||
return f"sha256:{hex_value[:length]}..."
|
||||
return hash_string[:length + 10] + "..."
|
||||
|
|
@ -21,6 +21,7 @@ from argon2.exceptions import VerifyMismatchError
|
|||
from .db import connect_db, init_db, query_all, query_one
|
||||
from .rate_limit import RateLimiter
|
||||
from .sync import process_webhook, get_categories_cache_path, get_repo_dir
|
||||
from ..hash_utils import compute_yaml_hash
|
||||
from .stats import (
|
||||
refresh_tool_stats, get_tool_stats, refresh_publisher_stats,
|
||||
get_publisher_stats, track_tool_usage, calculate_badges, BADGES,
|
||||
|
|
@ -56,7 +57,7 @@ ALLOWED_SORT = {
|
|||
"/categories": {"name", "tool_count"},
|
||||
}
|
||||
|
||||
TOOL_NAME_RE = re.compile(r"^[A-Za-z0-9-]{1,64}$")
|
||||
TOOL_NAME_RE = re.compile(r"^[A-Za-z0-9_-]{1,64}$")
|
||||
OWNER_RE = re.compile(r"^[a-z0-9][a-z0-9-]{0,37}[a-z0-9]$")
|
||||
EMAIL_RE = re.compile(r"^[^@\s]+@[^@\s]+\.[^@\s]+$")
|
||||
RESERVED_SLUGS = {"official", "admin", "system", "api", "registry", "cmdforge"}
|
||||
|
|
@ -996,6 +997,7 @@ def create_app() -> Flask:
|
|||
"resolved_version": row["version"],
|
||||
"config": row["config_yaml"],
|
||||
"readme": row["readme"] or "",
|
||||
"config_hash": row.get("config_hash") or "",
|
||||
}
|
||||
})
|
||||
response.headers["Cache-Control"] = "max-age=3600, immutable"
|
||||
|
|
@ -1016,10 +1018,12 @@ def create_app() -> Flask:
|
|||
categories_payload = yaml.safe_load(categories_yaml.read_text(encoding="utf-8")) or {}
|
||||
predefined_categories = (categories_payload or {}).get("categories", [])
|
||||
|
||||
# Get counts for all categories in the database
|
||||
# Get counts for all categories in the database (filtered by visibility)
|
||||
vis_filter, vis_params = build_visibility_filter()
|
||||
counts = query_all(
|
||||
g.db,
|
||||
"SELECT category, COUNT(DISTINCT owner || '/' || name) AS total FROM tools GROUP BY category",
|
||||
f"SELECT category, COUNT(DISTINCT owner || '/' || name) AS total FROM tools WHERE 1=1 {vis_filter} GROUP BY category",
|
||||
vis_params,
|
||||
)
|
||||
count_map = {row["category"]: row["total"] for row in counts}
|
||||
|
||||
|
|
@ -1077,31 +1081,35 @@ def create_app() -> Flask:
|
|||
category = request.args.get("category")
|
||||
limit = min(int(request.args.get("limit", 100)), 500)
|
||||
|
||||
# Build visibility filter
|
||||
vis_filter, vis_params = build_visibility_filter("tools")
|
||||
|
||||
# Build query - extract tags from JSON array and count occurrences
|
||||
if category:
|
||||
rows = query_all(
|
||||
g.db,
|
||||
"""
|
||||
f"""
|
||||
SELECT tag.value AS name, COUNT(DISTINCT tools.owner || '/' || tools.name) AS count
|
||||
FROM tools, json_each(tools.tags) AS tag
|
||||
WHERE tools.category = ?
|
||||
WHERE tools.category = ? {vis_filter}
|
||||
GROUP BY tag.value
|
||||
ORDER BY count DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(category, limit),
|
||||
[category] + vis_params + [limit],
|
||||
)
|
||||
else:
|
||||
rows = query_all(
|
||||
g.db,
|
||||
"""
|
||||
f"""
|
||||
SELECT tag.value AS name, COUNT(DISTINCT tools.owner || '/' || tools.name) AS count
|
||||
FROM tools, json_each(tools.tags) AS tag
|
||||
WHERE 1=1 {vis_filter}
|
||||
GROUP BY tag.value
|
||||
ORDER BY count DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(limit,),
|
||||
vis_params + [limit],
|
||||
)
|
||||
|
||||
data = [{"name": row["name"], "count": row["count"]} for row in rows]
|
||||
|
|
@ -1792,6 +1800,9 @@ def create_app() -> Flask:
|
|||
|
||||
tags_json = json.dumps(tags)
|
||||
|
||||
# Compute content hash for integrity verification
|
||||
config_hash = compute_yaml_hash(config_text)
|
||||
|
||||
# Determine status based on scrutiny
|
||||
if scrutiny_report and scrutiny_report.get("decision") == "approve":
|
||||
scrutiny_status = "approved"
|
||||
|
|
@ -1816,8 +1827,8 @@ def create_app() -> Flask:
|
|||
owner, name, version, description, category, tags, config_yaml, readme,
|
||||
publisher_id, deprecated, deprecated_message, replacement, downloads,
|
||||
scrutiny_status, scrutiny_report, source, source_url, source_json,
|
||||
visibility, moderation_status, published_at
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
config_hash, visibility, moderation_status, published_at
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
[
|
||||
owner,
|
||||
|
|
@ -1838,6 +1849,7 @@ def create_app() -> Flask:
|
|||
source,
|
||||
source_url,
|
||||
source_json,
|
||||
config_hash,
|
||||
visibility,
|
||||
moderation_status,
|
||||
datetime.utcnow().isoformat(),
|
||||
|
|
@ -1980,6 +1992,90 @@ def create_app() -> Flask:
|
|||
|
||||
return jsonify({"data": {"status": "updated"}})
|
||||
|
||||
@app.route("/api/v1/me", methods=["POST"])
|
||||
@require_token
|
||||
def update_profile() -> Response:
|
||||
"""Update current user's profile (POST version for web forms)."""
|
||||
data = request.get_json() or {}
|
||||
|
||||
# Validate fields
|
||||
display_name = data.get("display_name", "").strip()
|
||||
bio = data.get("bio", "").strip() if data.get("bio") else None
|
||||
website = data.get("website", "").strip() if data.get("website") else None
|
||||
|
||||
if display_name and len(display_name) > 100:
|
||||
return error_response("VALIDATION_ERROR", "Display name too long (max 100)", 400)
|
||||
if bio and len(bio) > 500:
|
||||
return error_response("VALIDATION_ERROR", "Bio too long (max 500)", 400)
|
||||
if website and len(website) > 200:
|
||||
return error_response("VALIDATION_ERROR", "Website URL too long (max 200)", 400)
|
||||
|
||||
# Build update query
|
||||
updates = []
|
||||
params = []
|
||||
if display_name:
|
||||
updates.append("display_name = ?")
|
||||
params.append(display_name)
|
||||
if bio is not None:
|
||||
updates.append("bio = ?")
|
||||
params.append(bio)
|
||||
if website is not None:
|
||||
updates.append("website = ?")
|
||||
params.append(website)
|
||||
|
||||
if not updates:
|
||||
return error_response("VALIDATION_ERROR", "No valid fields to update", 400)
|
||||
|
||||
updates.append("updated_at = CURRENT_TIMESTAMP")
|
||||
params.append(g.current_publisher["id"])
|
||||
|
||||
g.db.execute(
|
||||
f"UPDATE publishers SET {', '.join(updates)} WHERE id = ?",
|
||||
params,
|
||||
)
|
||||
g.db.commit()
|
||||
|
||||
return jsonify({"data": {"status": "updated"}})
|
||||
|
||||
@app.route("/api/v1/me/password", methods=["POST"])
|
||||
@require_token
|
||||
def change_password() -> Response:
|
||||
"""Change current user's password."""
|
||||
data = request.get_json() or {}
|
||||
|
||||
current_password = data.get("current_password", "")
|
||||
new_password = data.get("new_password", "")
|
||||
|
||||
if not current_password or not new_password:
|
||||
return error_response("VALIDATION_ERROR", "Current and new password required", 400)
|
||||
|
||||
if len(new_password) < 8:
|
||||
return error_response("VALIDATION_ERROR", "New password must be at least 8 characters", 400)
|
||||
|
||||
# Verify current password
|
||||
publisher = query_one(
|
||||
g.db,
|
||||
"SELECT password_hash FROM publishers WHERE id = ?",
|
||||
[g.current_publisher["id"]],
|
||||
)
|
||||
if not publisher:
|
||||
return error_response("NOT_FOUND", "Publisher not found", 404)
|
||||
|
||||
try:
|
||||
password_hasher.verify(publisher["password_hash"], current_password)
|
||||
except VerifyMismatchError:
|
||||
return error_response("INVALID_PASSWORD", "Current password is incorrect", 400)
|
||||
|
||||
# Hash and save new password
|
||||
new_hash = password_hasher.hash(new_password)
|
||||
g.db.execute(
|
||||
"UPDATE publishers SET password_hash = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ?",
|
||||
[new_hash, g.current_publisher["id"]],
|
||||
)
|
||||
g.db.commit()
|
||||
|
||||
return jsonify({"data": {"status": "password_changed"}})
|
||||
|
||||
@app.route("/api/v1/featured/tools", methods=["GET"])
|
||||
def featured_tools() -> Response:
|
||||
"""Get featured tools for homepage/landing."""
|
||||
|
|
@ -2303,6 +2399,14 @@ def create_app() -> Flask:
|
|||
|
||||
data = []
|
||||
for row in rows:
|
||||
# Parse scrutiny report if available
|
||||
scrutiny_report = None
|
||||
if row["scrutiny_report"]:
|
||||
try:
|
||||
scrutiny_report = json.loads(row["scrutiny_report"])
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
data.append({
|
||||
"id": row["id"],
|
||||
"owner": row["owner"],
|
||||
|
|
@ -2313,6 +2417,8 @@ def create_app() -> Flask:
|
|||
"published_at": row["published_at"],
|
||||
"publisher_name": row["publisher_name"],
|
||||
"visibility": row["visibility"],
|
||||
"scrutiny_status": row["scrutiny_status"],
|
||||
"scrutiny_report": scrutiny_report,
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
|
|
@ -2382,6 +2488,92 @@ def create_app() -> Flask:
|
|||
|
||||
return jsonify({"data": {"status": "rejected", "tool_id": tool_id}})
|
||||
|
||||
@app.route("/api/v1/admin/scrutiny", methods=["GET"])
|
||||
@require_moderator
|
||||
def admin_scrutiny_audit() -> Response:
|
||||
"""List all tools with their scrutiny status for audit purposes."""
|
||||
page = request.args.get("page", 1, type=int)
|
||||
per_page = min(request.args.get("per_page", 50, type=int), 100)
|
||||
offset = (page - 1) * per_page
|
||||
|
||||
# Filter options
|
||||
scrutiny_filter = request.args.get("scrutiny_status") # approved, pending_review, pending
|
||||
moderation_filter = request.args.get("moderation_status") # approved, pending, rejected
|
||||
|
||||
where_clauses = []
|
||||
params = []
|
||||
|
||||
if scrutiny_filter:
|
||||
where_clauses.append("t.scrutiny_status = ?")
|
||||
params.append(scrutiny_filter)
|
||||
if moderation_filter:
|
||||
where_clauses.append("t.moderation_status = ?")
|
||||
params.append(moderation_filter)
|
||||
|
||||
where_sql = " AND ".join(where_clauses) if where_clauses else "1=1"
|
||||
|
||||
rows = query_all(
|
||||
g.db,
|
||||
f"""
|
||||
SELECT t.id, t.owner, t.name, t.version, t.description, t.category,
|
||||
t.scrutiny_status, t.scrutiny_report, t.moderation_status,
|
||||
t.moderation_note, t.published_at, p.display_name as publisher_name
|
||||
FROM tools t
|
||||
JOIN publishers p ON t.publisher_id = p.id
|
||||
WHERE {where_sql}
|
||||
ORDER BY t.published_at DESC
|
||||
LIMIT ? OFFSET ?
|
||||
""",
|
||||
params + [per_page, offset],
|
||||
)
|
||||
|
||||
count_row = query_one(
|
||||
g.db,
|
||||
f"SELECT COUNT(*) as total FROM tools t WHERE {where_sql}",
|
||||
params,
|
||||
)
|
||||
total = count_row["total"] if count_row else 0
|
||||
|
||||
data = []
|
||||
for row in rows:
|
||||
scrutiny_report = None
|
||||
if row["scrutiny_report"]:
|
||||
try:
|
||||
scrutiny_report = json.loads(row["scrutiny_report"])
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
data.append({
|
||||
"id": row["id"],
|
||||
"owner": row["owner"],
|
||||
"name": row["name"],
|
||||
"version": row["version"],
|
||||
"description": row["description"],
|
||||
"category": row["category"],
|
||||
"published_at": row["published_at"],
|
||||
"publisher_name": row["publisher_name"],
|
||||
"scrutiny_status": row["scrutiny_status"],
|
||||
"scrutiny_report": scrutiny_report,
|
||||
"moderation_status": row["moderation_status"],
|
||||
"moderation_note": row["moderation_note"],
|
||||
})
|
||||
|
||||
# Also get summary stats
|
||||
stats = query_all(
|
||||
g.db,
|
||||
"""
|
||||
SELECT scrutiny_status, moderation_status, COUNT(*) as count
|
||||
FROM tools
|
||||
GROUP BY scrutiny_status, moderation_status
|
||||
""",
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
"data": data,
|
||||
"meta": paginate(page, per_page, total),
|
||||
"stats": [dict(s) for s in stats],
|
||||
})
|
||||
|
||||
@app.route("/api/v1/admin/tools/<int:tool_id>/remove", methods=["POST"])
|
||||
@require_moderator
|
||||
def admin_remove_tool(tool_id: int) -> Response:
|
||||
|
|
|
|||
|
|
@ -59,6 +59,7 @@ CREATE TABLE IF NOT EXISTS tools (
|
|||
source TEXT,
|
||||
source_url TEXT,
|
||||
source_json TEXT,
|
||||
config_hash TEXT,
|
||||
visibility TEXT DEFAULT 'public',
|
||||
moderation_status TEXT DEFAULT 'pending',
|
||||
moderation_note TEXT,
|
||||
|
|
@ -448,6 +449,7 @@ def migrate_db(conn: sqlite3.Connection) -> None:
|
|||
("source", "TEXT", "NULL"),
|
||||
("source_url", "TEXT", "NULL"),
|
||||
("source_json", "TEXT", "NULL"),
|
||||
("config_hash", "TEXT", "NULL"),
|
||||
("visibility", "TEXT", "'public'"),
|
||||
("moderation_status", "TEXT", "'pending'"),
|
||||
("moderation_note", "TEXT", "NULL"),
|
||||
|
|
@ -497,12 +499,13 @@ def migrate_db(conn: sqlite3.Connection) -> None:
|
|||
pass
|
||||
|
||||
# Grandfather existing tools: set moderation_status to 'approved' for tools that have NULL
|
||||
# This ensures existing tools remain visible after migration
|
||||
# This ensures existing tools remain visible after migration (one-time migration)
|
||||
# Note: Only applies to NULL, NOT to 'pending' - pending tools need manual review
|
||||
try:
|
||||
conn.execute("""
|
||||
UPDATE tools
|
||||
SET moderation_status = 'approved'
|
||||
WHERE moderation_status IS NULL OR moderation_status = 'pending'
|
||||
WHERE moderation_status IS NULL
|
||||
""")
|
||||
conn.commit()
|
||||
except sqlite3.OperationalError:
|
||||
|
|
@ -512,6 +515,7 @@ def migrate_db(conn: sqlite3.Connection) -> None:
|
|||
try:
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_tools_owner ON tools(owner)")
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_tools_moderation ON tools(moderation_status, visibility)")
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_tools_hash ON tools(config_hash)")
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_publishers_role ON publishers(role)")
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_publishers_banned ON publishers(banned)")
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_audit_log_target ON audit_log(target_type, target_id)")
|
||||
|
|
|
|||
|
|
@ -112,6 +112,7 @@ class DownloadResult:
|
|||
resolved_version: str
|
||||
config_yaml: str
|
||||
readme: str = ""
|
||||
config_hash: str = "" # Registry hash for integrity verification
|
||||
|
||||
|
||||
class RegistryClient:
|
||||
|
|
@ -544,7 +545,8 @@ class RegistryClient:
|
|||
name=data.get("name", name),
|
||||
resolved_version=data.get("resolved_version", ""),
|
||||
config_yaml=data.get("config", ""),
|
||||
readme=data.get("readme", "")
|
||||
readme=data.get("readme", ""),
|
||||
config_hash=data.get("config_hash", "")
|
||||
)
|
||||
|
||||
def get_categories(self) -> List[Dict[str, Any]]:
|
||||
|
|
|
|||
|
|
@ -352,7 +352,8 @@ class ToolResolver:
|
|||
name=result.name,
|
||||
version=result.resolved_version,
|
||||
config_yaml=result.config_yaml,
|
||||
readme=result.readme
|
||||
readme=result.readme,
|
||||
config_hash=result.config_hash
|
||||
)
|
||||
|
||||
if self.verbose:
|
||||
|
|
@ -379,13 +380,30 @@ class ToolResolver:
|
|||
name: str,
|
||||
version: str,
|
||||
config_yaml: str,
|
||||
readme: str = ""
|
||||
readme: str = "",
|
||||
config_hash: str = ""
|
||||
) -> ResolvedTool:
|
||||
"""Install a tool fetched from registry to global directory."""
|
||||
# Verify hash if provided
|
||||
if config_hash:
|
||||
from .hash_utils import compute_yaml_hash
|
||||
computed_hash = compute_yaml_hash(config_yaml)
|
||||
if computed_hash != config_hash:
|
||||
raise RuntimeError(
|
||||
f"Hash mismatch for {owner}/{name}: expected {config_hash[:20]}..., "
|
||||
f"got {computed_hash[:20]}... - content may have been tampered with"
|
||||
)
|
||||
|
||||
# Create directory structure
|
||||
tool_dir = TOOLS_DIR / owner / name
|
||||
tool_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Add registry_hash to config so we can track publish state
|
||||
if config_hash:
|
||||
parsed_config = yaml.safe_load(config_yaml)
|
||||
parsed_config["registry_hash"] = config_hash
|
||||
config_yaml = yaml.dump(parsed_config, default_flow_style=False, sort_keys=False)
|
||||
|
||||
# Write config
|
||||
config_path = tool_dir / "config.yaml"
|
||||
config_path.write_text(config_yaml)
|
||||
|
|
@ -593,7 +611,8 @@ def install_from_registry(spec: str, version: Optional[str] = None) -> ResolvedT
|
|||
name=result.name,
|
||||
version=result.resolved_version,
|
||||
config_yaml=result.config_yaml,
|
||||
readme=result.readme
|
||||
readme=result.readme,
|
||||
config_hash=result.config_hash
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -233,30 +233,68 @@ def tools():
|
|||
return _render_tools()
|
||||
|
||||
|
||||
def _load_tags() -> List[SimpleNamespace]:
|
||||
"""Load all tags with counts."""
|
||||
status, payload = _api_get("/api/v1/tags", params={"per_page": 100})
|
||||
if status != 200:
|
||||
return []
|
||||
tags = []
|
||||
for item in payload.get("data", []):
|
||||
name = item.get("name")
|
||||
if not name:
|
||||
continue
|
||||
tags.append(SimpleNamespace(
|
||||
name=name,
|
||||
count=item.get("count", 0),
|
||||
))
|
||||
return tags
|
||||
|
||||
|
||||
def _render_tools(category_override: Optional[str] = None):
|
||||
page = request.args.get("page", 1)
|
||||
sort = request.args.get("sort", "downloads")
|
||||
category = category_override or request.args.get("category")
|
||||
query = request.args.get("q")
|
||||
|
||||
# Parse tag filter parameters (include/exclude)
|
||||
include_tags_param = request.args.get("include_tags", "")
|
||||
exclude_tags_param = request.args.get("exclude_tags", "")
|
||||
include_tags = [t.strip() for t in include_tags_param.split(",") if t.strip()]
|
||||
exclude_tags = [t.strip() for t in exclude_tags_param.split(",") if t.strip()]
|
||||
|
||||
params = {"page": page, "per_page": 12, "sort": sort}
|
||||
if category:
|
||||
params["category"] = category
|
||||
if include_tags:
|
||||
params["tags"] = ",".join(include_tags)
|
||||
|
||||
status, payload = _api_get("/api/v1/tools", params=params)
|
||||
if status != 200:
|
||||
return render_template("errors/500.html"), 500
|
||||
|
||||
# Filter out excluded tags client-side (API doesn't support exclude)
|
||||
tools = payload.get("data", [])
|
||||
if exclude_tags:
|
||||
def has_excluded_tag(tool):
|
||||
tool_tags = tool.get("tags", [])
|
||||
return any(t in tool_tags for t in exclude_tags)
|
||||
tools = [t for t in tools if not has_excluded_tag(t)]
|
||||
|
||||
meta = payload.get("meta", {})
|
||||
categories, all_tools_total = _load_categories()
|
||||
all_tags = _load_tags()
|
||||
return render_template(
|
||||
"pages/tools.html",
|
||||
tools=payload.get("data", []),
|
||||
tools=tools,
|
||||
categories=categories,
|
||||
current_category=category,
|
||||
total_count=all_tools_total,
|
||||
sort=sort,
|
||||
query=query,
|
||||
pagination=_build_pagination(meta),
|
||||
all_tags=all_tags,
|
||||
include_tags=include_tags,
|
||||
exclude_tags=exclude_tags,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -625,13 +663,56 @@ def dashboard_tokens():
|
|||
)
|
||||
|
||||
|
||||
@web_bp.route("/dashboard/settings", endpoint="dashboard_settings")
|
||||
@web_bp.route("/dashboard/settings", endpoint="dashboard_settings", methods=["GET", "POST"])
|
||||
def dashboard_settings():
|
||||
redirect_response = _require_login()
|
||||
if redirect_response:
|
||||
return redirect_response
|
||||
token = session.get("auth_token")
|
||||
user = _load_current_publisher() or session.get("user", {})
|
||||
errors = []
|
||||
success_message = None
|
||||
|
||||
if request.method == "POST":
|
||||
form_type = request.form.get("form")
|
||||
|
||||
if form_type == "profile":
|
||||
# Update profile
|
||||
data = {
|
||||
"display_name": request.form.get("display_name", "").strip(),
|
||||
"bio": request.form.get("bio", "").strip(),
|
||||
"website": request.form.get("website", "").strip(),
|
||||
}
|
||||
status, payload = _api_post("/api/v1/me", data=data, token=token)
|
||||
if status == 200:
|
||||
success_message = "Profile updated successfully."
|
||||
user = _load_current_publisher() or session.get("user", {})
|
||||
else:
|
||||
errors.append(payload.get("error", {}).get("message", "Failed to update profile."))
|
||||
|
||||
elif form_type == "password":
|
||||
# Change password
|
||||
current_password = request.form.get("current_password", "")
|
||||
new_password = request.form.get("new_password", "")
|
||||
confirm_password = request.form.get("confirm_password", "")
|
||||
|
||||
if not current_password or not new_password:
|
||||
errors.append("Please fill in all password fields.")
|
||||
elif new_password != confirm_password:
|
||||
errors.append("New passwords do not match.")
|
||||
elif len(new_password) < 8:
|
||||
errors.append("New password must be at least 8 characters.")
|
||||
else:
|
||||
data = {
|
||||
"current_password": current_password,
|
||||
"new_password": new_password,
|
||||
}
|
||||
status, payload = _api_post("/api/v1/me/password", data=data, token=token)
|
||||
if status == 200:
|
||||
success_message = "Password updated successfully."
|
||||
else:
|
||||
errors.append(payload.get("error", {}).get("message", "Failed to update password."))
|
||||
|
||||
tools_status, tools_payload = _api_get("/api/v1/me/tools", token=token)
|
||||
tools = tools_payload.get("data", []) if tools_status == 200 else []
|
||||
token_status, token_payload = _api_get("/api/v1/tokens", token=token)
|
||||
|
|
@ -647,8 +728,8 @@ def dashboard_settings():
|
|||
tools=tools,
|
||||
stats=stats,
|
||||
tokens=tokens,
|
||||
errors=[],
|
||||
success_message=None,
|
||||
errors=errors,
|
||||
success_message=success_message,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -803,6 +884,8 @@ def sitemap():
|
|||
SELECT owner, name, MAX(id) AS max_id
|
||||
FROM tools
|
||||
WHERE version NOT LIKE '%-%'
|
||||
AND visibility = 'public'
|
||||
AND moderation_status = 'approved'
|
||||
GROUP BY owner, name
|
||||
)
|
||||
SELECT t.owner, t.name, t.published_at
|
||||
|
|
@ -1132,3 +1215,71 @@ def admin_settings():
|
|||
categories=categories,
|
||||
token=token,
|
||||
)
|
||||
|
||||
|
||||
@web_bp.route("/dashboard/admin/scrutiny", endpoint="admin_scrutiny")
|
||||
def admin_scrutiny():
|
||||
"""Scrutiny audit page - view all tool scrutiny results."""
|
||||
forbidden = _require_moderator_role()
|
||||
if forbidden:
|
||||
return forbidden
|
||||
|
||||
user = _load_current_publisher()
|
||||
token = session.get("auth_token")
|
||||
page = request.args.get("page", 1, type=int)
|
||||
scrutiny_filter = request.args.get("scrutiny_status", "")
|
||||
moderation_filter = request.args.get("moderation_status", "")
|
||||
|
||||
params = {"page": page, "per_page": 50}
|
||||
if scrutiny_filter:
|
||||
params["scrutiny_status"] = scrutiny_filter
|
||||
if moderation_filter:
|
||||
params["moderation_status"] = moderation_filter
|
||||
|
||||
status, payload = _api_get("/api/v1/admin/scrutiny", params=params, token=token)
|
||||
|
||||
if status != 200:
|
||||
return render_template(
|
||||
"admin/scrutiny.html",
|
||||
user=user,
|
||||
active_page="admin_scrutiny",
|
||||
tools=[],
|
||||
meta={},
|
||||
stats_summary={},
|
||||
scrutiny_filter=scrutiny_filter,
|
||||
moderation_filter=moderation_filter,
|
||||
error=payload.get("error", "Failed to load scrutiny data"),
|
||||
)
|
||||
|
||||
# Process stats into easy-to-use summary
|
||||
stats = payload.get("stats", [])
|
||||
stats_summary = {
|
||||
"approved_approved": 0,
|
||||
"review_pending": 0,
|
||||
"review_approved": 0,
|
||||
"total_pending": 0,
|
||||
}
|
||||
for s in stats:
|
||||
scrutiny = s.get("scrutiny_status")
|
||||
moderation = s.get("moderation_status")
|
||||
count = s.get("count", 0)
|
||||
|
||||
if scrutiny == "approved" and moderation == "approved":
|
||||
stats_summary["approved_approved"] += count
|
||||
if scrutiny == "pending_review" and moderation == "pending":
|
||||
stats_summary["review_pending"] += count
|
||||
if scrutiny == "pending_review" and moderation == "approved":
|
||||
stats_summary["review_approved"] += count
|
||||
if moderation == "pending":
|
||||
stats_summary["total_pending"] += count
|
||||
|
||||
return render_template(
|
||||
"admin/scrutiny.html",
|
||||
user=user,
|
||||
active_page="admin_scrutiny",
|
||||
tools=payload.get("data", []),
|
||||
meta=payload.get("meta", {}),
|
||||
stats_summary=stats_summary,
|
||||
scrutiny_filter=scrutiny_filter,
|
||||
moderation_filter=moderation_filter,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -26,12 +26,13 @@ def generate_sitemap() -> str:
|
|||
|
||||
conn = connect_db()
|
||||
try:
|
||||
rows = query_all(conn, "SELECT DISTINCT owner, name FROM tools")
|
||||
# Only include approved public tools in sitemap
|
||||
rows = query_all(conn, "SELECT DISTINCT owner, name FROM tools WHERE visibility = 'public' AND moderation_status = 'approved'")
|
||||
for row in rows:
|
||||
tool_path = url_for("web.tool_detail", owner=row["owner"], name=row["name"], _external=True)
|
||||
urls.append(_url_entry(tool_path, "daily", "0.9"))
|
||||
|
||||
categories = query_all(conn, "SELECT DISTINCT category FROM tools WHERE category IS NOT NULL")
|
||||
categories = query_all(conn, "SELECT DISTINCT category FROM tools WHERE category IS NOT NULL AND visibility = 'public' AND moderation_status = 'approved'")
|
||||
for row in categories:
|
||||
cat_path = url_for("web.category", name=row["category"], _external=True)
|
||||
urls.append(_url_entry(cat_path, "weekly", "0.7"))
|
||||
|
|
|
|||
|
|
@ -83,6 +83,12 @@
|
|||
</svg>
|
||||
<span class="text-sm font-medium text-gray-700">Manage publishers</span>
|
||||
</a>
|
||||
<a href="{{ url_for('web.admin_scrutiny') }}" class="flex items-center p-4 bg-gray-50 rounded-lg hover:bg-gray-100 transition-colors">
|
||||
<svg class="h-5 w-5 text-gray-400 mr-3" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 12l2 2 4-4m5.618-4.016A11.955 11.955 0 0112 2.944a11.955 11.955 0 01-8.618 3.04A12.02 12.02 0 003 9c0 5.591 3.824 10.29 9 11.622 5.176-1.332 9-6.03 9-11.622 0-1.042-.133-2.052-.382-3.016z"/>
|
||||
</svg>
|
||||
<span class="text-sm font-medium text-gray-700">Scrutiny audit</span>
|
||||
</a>
|
||||
{% if user.role == 'admin' %}
|
||||
<a href="{{ url_for('web.admin_settings') }}" class="flex items-center p-4 bg-gray-50 rounded-lg hover:bg-gray-100 transition-colors">
|
||||
<svg class="h-5 w-5 text-gray-400 mr-3" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
<tr>
|
||||
<th scope="col" class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider">Tool</th>
|
||||
<th scope="col" class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider">Publisher</th>
|
||||
<th scope="col" class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider">Category</th>
|
||||
<th scope="col" class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider">Scrutiny</th>
|
||||
<th scope="col" class="px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider">Submitted</th>
|
||||
<th scope="col" class="px-6 py-3 text-right text-xs font-medium text-gray-500 uppercase tracking-wider">Actions</th>
|
||||
</tr>
|
||||
|
|
@ -37,11 +37,40 @@
|
|||
</td>
|
||||
<td class="px-6 py-4 whitespace-nowrap">
|
||||
<div class="text-sm text-gray-900">{{ tool.publisher_name }}</div>
|
||||
<div class="text-xs text-gray-500">{{ tool.category or 'Uncategorized' }}</div>
|
||||
</td>
|
||||
<td class="px-6 py-4 whitespace-nowrap">
|
||||
<span class="px-2 inline-flex text-xs leading-5 font-semibold rounded-full bg-gray-100 text-gray-800">
|
||||
{{ tool.category or 'Uncategorized' }}
|
||||
<td class="px-6 py-4">
|
||||
{% if tool.scrutiny_status == 'approved' %}
|
||||
<span class="px-2 inline-flex text-xs leading-5 font-semibold rounded-full bg-green-100 text-green-800">
|
||||
Passed
|
||||
</span>
|
||||
{% elif tool.scrutiny_status == 'pending_review' %}
|
||||
<span class="px-2 inline-flex text-xs leading-5 font-semibold rounded-full bg-yellow-100 text-yellow-800">
|
||||
Warnings
|
||||
</span>
|
||||
{% else %}
|
||||
<span class="px-2 inline-flex text-xs leading-5 font-semibold rounded-full bg-gray-100 text-gray-800">
|
||||
{{ tool.scrutiny_status or 'N/A' }}
|
||||
</span>
|
||||
{% endif %}
|
||||
{% if tool.scrutiny_report and tool.scrutiny_report.findings %}
|
||||
<button onclick="toggleFindings({{ tool.id }})" class="ml-2 text-xs text-indigo-600 hover:text-indigo-800">
|
||||
({{ tool.scrutiny_report.findings | selectattr('result', 'equalto', 'warning') | list | length }} warnings)
|
||||
</button>
|
||||
<div id="findings-{{ tool.id }}" class="hidden mt-2 text-xs space-y-1 max-w-md">
|
||||
{% for finding in tool.scrutiny_report.findings %}
|
||||
{% if finding.result == 'warning' %}
|
||||
<div class="p-2 rounded bg-yellow-50">
|
||||
<span class="font-medium">{{ finding.check }}:</span>
|
||||
<span class="text-yellow-700">{{ finding.message }}</span>
|
||||
{% if finding.suggestion %}
|
||||
<div class="text-gray-500 italic text-xs">{{ finding.suggestion }}</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="px-6 py-4 whitespace-nowrap text-sm text-gray-500">
|
||||
{{ tool.published_at[:10] if tool.published_at else 'Unknown' }}
|
||||
|
|
@ -100,6 +129,11 @@
|
|||
<script>
|
||||
let currentToolId = null;
|
||||
|
||||
function toggleFindings(toolId) {
|
||||
const el = document.getElementById(`findings-${toolId}`);
|
||||
el.classList.toggle('hidden');
|
||||
}
|
||||
|
||||
async function approveTool(toolId) {
|
||||
if (!confirm('Approve this tool?')) return;
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,179 @@
|
|||
{% extends "dashboard/base.html" %}
|
||||
|
||||
{% block dashboard_header %}
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<h1 class="text-2xl font-bold text-gray-900">Scrutiny Audit</h1>
|
||||
<p class="mt-1 text-gray-600">Review automated scrutiny results for all tools</p>
|
||||
</div>
|
||||
<a href="{{ url_for('web.admin_dashboard') }}" class="text-sm text-indigo-600 hover:text-indigo-700">← Back to Admin</a>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block dashboard_content %}
|
||||
<!-- Stats Summary -->
|
||||
<div class="mb-6 grid grid-cols-2 md:grid-cols-4 gap-4">
|
||||
<div class="bg-white rounded-lg border border-gray-200 p-4">
|
||||
<div class="text-sm text-gray-500">Auto-Approved</div>
|
||||
<div class="text-2xl font-semibold text-green-600">{{ stats_summary.approved_approved or 0 }}</div>
|
||||
<div class="text-xs text-gray-400">Scrutiny passed</div>
|
||||
</div>
|
||||
<div class="bg-white rounded-lg border border-gray-200 p-4">
|
||||
<div class="text-sm text-gray-500">Needs Review</div>
|
||||
<div class="text-2xl font-semibold text-yellow-600">{{ stats_summary.review_pending or 0 }}</div>
|
||||
<div class="text-xs text-gray-400">Has warnings</div>
|
||||
</div>
|
||||
<div class="bg-white rounded-lg border border-gray-200 p-4">
|
||||
<div class="text-sm text-gray-500">Manually Approved</div>
|
||||
<div class="text-2xl font-semibold text-blue-600">{{ stats_summary.review_approved or 0 }}</div>
|
||||
<div class="text-xs text-gray-400">With warnings</div>
|
||||
</div>
|
||||
<div class="bg-white rounded-lg border border-gray-200 p-4">
|
||||
<div class="text-sm text-gray-500">Total Pending</div>
|
||||
<div class="text-2xl font-semibold text-gray-600">{{ stats_summary.total_pending or 0 }}</div>
|
||||
<div class="text-xs text-gray-400">Awaiting review</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Filters -->
|
||||
<div class="mb-4 flex flex-wrap gap-4">
|
||||
<form method="get" class="flex items-center gap-4">
|
||||
<select name="scrutiny_status" class="px-3 py-2 border border-gray-300 rounded-md text-sm">
|
||||
<option value="">All Scrutiny Status</option>
|
||||
<option value="approved" {% if scrutiny_filter == 'approved' %}selected{% endif %}>Approved</option>
|
||||
<option value="pending_review" {% if scrutiny_filter == 'pending_review' %}selected{% endif %}>Pending Review (Warnings)</option>
|
||||
<option value="pending" {% if scrutiny_filter == 'pending' %}selected{% endif %}>Pending</option>
|
||||
</select>
|
||||
<select name="moderation_status" class="px-3 py-2 border border-gray-300 rounded-md text-sm">
|
||||
<option value="">All Moderation Status</option>
|
||||
<option value="approved" {% if moderation_filter == 'approved' %}selected{% endif %}>Approved</option>
|
||||
<option value="pending" {% if moderation_filter == 'pending' %}selected{% endif %}>Pending</option>
|
||||
<option value="rejected" {% if moderation_filter == 'rejected' %}selected{% endif %}>Rejected</option>
|
||||
</select>
|
||||
<button type="submit" class="px-4 py-2 bg-indigo-600 text-white text-sm font-medium rounded-md hover:bg-indigo-700">Filter</button>
|
||||
{% if scrutiny_filter or moderation_filter %}
|
||||
<a href="{{ url_for('web.admin_scrutiny') }}" class="text-sm text-gray-500 hover:text-gray-700">Clear</a>
|
||||
{% endif %}
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg border border-gray-200">
|
||||
{% if tools %}
|
||||
<div class="overflow-x-auto">
|
||||
<table class="min-w-full divide-y divide-gray-200">
|
||||
<thead class="bg-gray-50">
|
||||
<tr>
|
||||
<th scope="col" class="px-4 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider">Tool</th>
|
||||
<th scope="col" class="px-4 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider">Scrutiny</th>
|
||||
<th scope="col" class="px-4 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider">Moderation</th>
|
||||
<th scope="col" class="px-4 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider">Findings</th>
|
||||
<th scope="col" class="px-4 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider">Published</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="bg-white divide-y divide-gray-200">
|
||||
{% for tool in tools %}
|
||||
<tr class="hover:bg-gray-50">
|
||||
<td class="px-4 py-4">
|
||||
<div class="text-sm font-medium text-gray-900">
|
||||
<a href="{{ url_for('web.tool_detail', owner=tool.owner, name=tool.name) }}" class="hover:text-indigo-600">
|
||||
{{ tool.owner }}/{{ tool.name }}
|
||||
</a>
|
||||
</div>
|
||||
<div class="text-xs text-gray-500">v{{ tool.version }} - {{ tool.category or 'Uncategorized' }}</div>
|
||||
</td>
|
||||
<td class="px-4 py-4 whitespace-nowrap">
|
||||
{% if tool.scrutiny_status == 'approved' %}
|
||||
<span class="px-2 inline-flex text-xs leading-5 font-semibold rounded-full bg-green-100 text-green-800">
|
||||
Passed
|
||||
</span>
|
||||
{% elif tool.scrutiny_status == 'pending_review' %}
|
||||
<span class="px-2 inline-flex text-xs leading-5 font-semibold rounded-full bg-yellow-100 text-yellow-800">
|
||||
Warnings
|
||||
</span>
|
||||
{% else %}
|
||||
<span class="px-2 inline-flex text-xs leading-5 font-semibold rounded-full bg-gray-100 text-gray-800">
|
||||
{{ tool.scrutiny_status or 'N/A' }}
|
||||
</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="px-4 py-4 whitespace-nowrap">
|
||||
{% if tool.moderation_status == 'approved' %}
|
||||
<span class="px-2 inline-flex text-xs leading-5 font-semibold rounded-full bg-green-100 text-green-800">
|
||||
Approved
|
||||
</span>
|
||||
{% elif tool.moderation_status == 'pending' %}
|
||||
<span class="px-2 inline-flex text-xs leading-5 font-semibold rounded-full bg-yellow-100 text-yellow-800">
|
||||
Pending
|
||||
</span>
|
||||
{% elif tool.moderation_status == 'rejected' %}
|
||||
<span class="px-2 inline-flex text-xs leading-5 font-semibold rounded-full bg-red-100 text-red-800">
|
||||
Rejected
|
||||
</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="px-4 py-4">
|
||||
{% if tool.scrutiny_report and tool.scrutiny_report.findings %}
|
||||
<button onclick="showFindings({{ tool.id }})" class="text-sm text-indigo-600 hover:text-indigo-800">
|
||||
View {{ tool.scrutiny_report.findings | length }} finding(s)
|
||||
</button>
|
||||
<div id="findings-{{ tool.id }}" class="hidden mt-2 text-xs space-y-1">
|
||||
{% for finding in tool.scrutiny_report.findings %}
|
||||
<div class="p-2 rounded {% if finding.result == 'pass' %}bg-green-50{% elif finding.result == 'warning' %}bg-yellow-50{% else %}bg-red-50{% endif %}">
|
||||
<span class="font-medium">{{ finding.check }}:</span>
|
||||
<span class="{% if finding.result == 'pass' %}text-green-700{% elif finding.result == 'warning' %}text-yellow-700{% else %}text-red-700{% endif %}">
|
||||
{{ finding.result }}
|
||||
</span>
|
||||
<div class="text-gray-600">{{ finding.message }}</div>
|
||||
{% if finding.suggestion %}
|
||||
<div class="text-gray-500 italic">Suggestion: {{ finding.suggestion }}</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% else %}
|
||||
<span class="text-xs text-gray-400">No findings</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="px-4 py-4 whitespace-nowrap text-sm text-gray-500">
|
||||
{{ tool.published_at[:10] if tool.published_at else 'Unknown' }}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<!-- Pagination -->
|
||||
{% if meta.total_pages > 1 %}
|
||||
<div class="bg-gray-50 px-6 py-3 flex items-center justify-between border-t border-gray-200">
|
||||
<div class="text-sm text-gray-700">
|
||||
Page {{ meta.page }} of {{ meta.total_pages }} ({{ meta.total }} total)
|
||||
</div>
|
||||
<div class="flex space-x-2">
|
||||
{% if meta.page > 1 %}
|
||||
<a href="?page={{ meta.page - 1 }}{% if scrutiny_filter %}&scrutiny_status={{ scrutiny_filter }}{% endif %}{% if moderation_filter %}&moderation_status={{ moderation_filter }}{% endif %}" class="px-3 py-1 border rounded text-sm hover:bg-gray-100">Previous</a>
|
||||
{% endif %}
|
||||
{% if meta.page < meta.total_pages %}
|
||||
<a href="?page={{ meta.page + 1 }}{% if scrutiny_filter %}&scrutiny_status={{ scrutiny_filter }}{% endif %}{% if moderation_filter %}&moderation_status={{ moderation_filter }}{% endif %}" class="px-3 py-1 border rounded text-sm hover:bg-gray-100">Next</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% else %}
|
||||
<div class="text-center py-12">
|
||||
<svg class="mx-auto h-12 w-12 text-gray-400" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 5H7a2 2 0 00-2 2v12a2 2 0 002 2h10a2 2 0 002-2V7a2 2 0 00-2-2h-2M9 5a2 2 0 002 2h2a2 2 0 002-2M9 5a2 2 0 012-2h2a2 2 0 012 2"/>
|
||||
</svg>
|
||||
<h3 class="mt-2 text-sm font-medium text-gray-900">No tools found</h3>
|
||||
<p class="mt-1 text-sm text-gray-500">Try adjusting your filters.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<script>
|
||||
function showFindings(toolId) {
|
||||
const el = document.getElementById(`findings-${toolId}`);
|
||||
el.classList.toggle('hidden');
|
||||
}
|
||||
</script>
|
||||
{% endblock %}
|
||||
|
|
@ -67,6 +67,43 @@
|
|||
</ul>
|
||||
</div>
|
||||
|
||||
<!-- Tags Filter -->
|
||||
{% if all_tags %}
|
||||
<div>
|
||||
<button type="button"
|
||||
onclick="toggleTagsFilter()"
|
||||
class="flex items-center justify-between w-full text-sm font-medium text-gray-700 mb-3">
|
||||
<span>Tags</span>
|
||||
<svg id="tags-chevron" class="w-4 h-4 text-gray-500 transition-transform {{ '' if include_tags or exclude_tags else 'rotate-180' }}" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M19 9l-7 7-7-7"/>
|
||||
</svg>
|
||||
</button>
|
||||
<div id="tags-filter" class="{{ '' if include_tags or exclude_tags else 'hidden' }}">
|
||||
{% if include_tags or exclude_tags %}
|
||||
<div class="flex items-center justify-between mb-2">
|
||||
<span class="text-xs text-gray-500">Click to cycle: include/exclude/none</span>
|
||||
<a href="{{ url_for('web.tools', sort=sort, category=current_category) }}" class="text-xs text-indigo-600 hover:underline">Clear</a>
|
||||
</div>
|
||||
{% else %}
|
||||
<p class="text-xs text-gray-500 mb-2">Click to cycle: include/exclude/none</p>
|
||||
{% endif %}
|
||||
<div class="flex flex-wrap gap-2">
|
||||
{% for tag in all_tags[:20] %}
|
||||
{% set tag_state = 'include' if tag.name in include_tags else ('exclude' if tag.name in exclude_tags else 'neutral') %}
|
||||
<button type="button"
|
||||
onclick="cycleTagState('{{ tag.name }}', '{{ tag_state }}')"
|
||||
class="tag-btn px-2 py-1 text-xs rounded-full border transition-colors
|
||||
{% if tag_state == 'include' %}bg-green-100 border-green-300 text-green-700
|
||||
{% elif tag_state == 'exclude' %}bg-red-100 border-red-300 text-red-700 line-through
|
||||
{% else %}bg-white border-gray-300 text-gray-600 hover:border-gray-400{% endif %}">
|
||||
{{ tag.name }}
|
||||
</button>
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
<!-- Sort -->
|
||||
<div>
|
||||
<label for="sort-select" class="block text-sm font-medium text-gray-700 mb-3">Sort By</label>
|
||||
|
|
@ -133,6 +170,29 @@
|
|||
<option value="name" {{ 'selected' if sort == 'name' }}>Name (A-Z)</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<!-- Mobile Tags Filter -->
|
||||
{% if all_tags %}
|
||||
<div>
|
||||
<label class="block text-sm font-medium text-gray-700 mb-2">Tags</label>
|
||||
<div class="flex flex-wrap gap-2">
|
||||
{% for tag in all_tags[:12] %}
|
||||
{% set tag_state = 'include' if tag.name in include_tags else ('exclude' if tag.name in exclude_tags else 'neutral') %}
|
||||
<button type="button"
|
||||
onclick="cycleTagState('{{ tag.name }}', '{{ tag_state }}')"
|
||||
class="px-2 py-1 text-xs rounded-full border transition-colors
|
||||
{% if tag_state == 'include' %}bg-green-100 border-green-300 text-green-700
|
||||
{% elif tag_state == 'exclude' %}bg-red-100 border-red-300 text-red-700 line-through
|
||||
{% else %}bg-white border-gray-300 text-gray-600{% endif %}">
|
||||
{{ tag.name }}
|
||||
</button>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% if include_tags or exclude_tags %}
|
||||
<a href="{{ url_for('web.tools', sort=sort, category=current_category) }}" class="text-xs text-indigo-600 hover:underline mt-2 inline-block">Clear tags</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
@ -160,7 +220,8 @@
|
|||
description=tool.description,
|
||||
category=tool.category,
|
||||
downloads=tool.downloads,
|
||||
version=tool.version
|
||||
version=tool.version,
|
||||
tags=tool.tags
|
||||
) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
|
|
@ -170,7 +231,7 @@
|
|||
<nav class="mt-12 flex items-center justify-center" aria-label="Pagination">
|
||||
<div class="flex items-center gap-2">
|
||||
{% if pagination.has_prev %}
|
||||
<a href="{{ url_for('web.tools', page=pagination.prev_num, sort=sort, category=current_category) }}"
|
||||
<a href="{{ url_for('web.tools', page=pagination.prev_num, sort=sort, category=current_category, include_tags=include_tags|join(',') if include_tags else none, exclude_tags=exclude_tags|join(',') if exclude_tags else none) }}"
|
||||
class="px-4 py-2 text-sm font-medium text-gray-700 bg-white border border-gray-300 rounded-md hover:bg-gray-50">
|
||||
Previous
|
||||
</a>
|
||||
|
|
@ -181,7 +242,7 @@
|
|||
</span>
|
||||
|
||||
{% if pagination.has_next %}
|
||||
<a href="{{ url_for('web.tools', page=pagination.next_num, sort=sort, category=current_category) }}"
|
||||
<a href="{{ url_for('web.tools', page=pagination.next_num, sort=sort, category=current_category, include_tags=include_tags|join(',') if include_tags else none, exclude_tags=exclude_tags|join(',') if exclude_tags else none) }}"
|
||||
class="px-4 py-2 text-sm font-medium text-gray-700 bg-white border border-gray-300 rounded-md hover:bg-gray-50">
|
||||
Next
|
||||
</a>
|
||||
|
|
@ -228,5 +289,47 @@ function updateSort(value) {
|
|||
url.searchParams.set('sort', value);
|
||||
window.location.href = url.toString();
|
||||
}
|
||||
|
||||
function toggleTagsFilter() {
|
||||
const panel = document.getElementById('tags-filter');
|
||||
const chevron = document.getElementById('tags-chevron');
|
||||
panel.classList.toggle('hidden');
|
||||
chevron.classList.toggle('rotate-180');
|
||||
}
|
||||
|
||||
function cycleTagState(tagName, currentState) {
|
||||
const url = new URL(window.location.href);
|
||||
let includeTags = (url.searchParams.get('include_tags') || '').split(',').filter(Boolean);
|
||||
let excludeTags = (url.searchParams.get('exclude_tags') || '').split(',').filter(Boolean);
|
||||
|
||||
// Remove from both lists first
|
||||
includeTags = includeTags.filter(t => t !== tagName);
|
||||
excludeTags = excludeTags.filter(t => t !== tagName);
|
||||
|
||||
// Cycle: neutral -> include -> exclude -> neutral
|
||||
if (currentState === 'neutral') {
|
||||
includeTags.push(tagName);
|
||||
} else if (currentState === 'include') {
|
||||
excludeTags.push(tagName);
|
||||
}
|
||||
// else: exclude -> neutral (already removed)
|
||||
|
||||
// Update URL params
|
||||
if (includeTags.length > 0) {
|
||||
url.searchParams.set('include_tags', includeTags.join(','));
|
||||
} else {
|
||||
url.searchParams.delete('include_tags');
|
||||
}
|
||||
if (excludeTags.length > 0) {
|
||||
url.searchParams.set('exclude_tags', excludeTags.join(','));
|
||||
} else {
|
||||
url.searchParams.delete('exclude_tags');
|
||||
}
|
||||
|
||||
// Reset to page 1 when filters change
|
||||
url.searchParams.delete('page');
|
||||
|
||||
window.location.href = url.toString();
|
||||
}
|
||||
</script>
|
||||
{% endblock %}
|
||||
|
|
|
|||
Loading…
Reference in New Issue