diff --git a/src/smarttools/web/docs_content.py b/src/smarttools/web/docs_content.py index 9b9655a..7456bf1 100644 --- a/src/smarttools/web/docs_content.py +++ b/src/smarttools/web/docs_content.py @@ -607,6 +607,643 @@ project. It implements:
("example-project", "Full Example Project"), ], }, + + "yaml-config": { + "title": "Understanding YAML Config", + "description": "Learn the structure of SmartTools configuration files", + "content": """ +Every SmartTool is defined by a YAML configuration file. This guide covers the complete +structure and all available options.
+ +Tool configs are stored in ~/.smarttools/<tool-name>/config.yaml.
# Required fields
+name: my-tool # Tool name (lowercase, hyphens)
+version: "1.0.0" # Semver version string
+
+# Recommended fields
+description: "What this tool does"
+category: text-processing # For registry organization
+tags: # Searchable tags
+ - text
+ - formatting
+
+# Optional metadata
+author: your-name
+license: MIT
+homepage: https://github.com/you/my-tool
+
+# Arguments (custom CLI flags)
+arguments:
+ - flag: --format
+ variable: format
+ default: "markdown"
+ description: Output format
+
+# Processing steps
+steps:
+ - type: prompt
+ provider: claude
+ prompt: |
+ Process this: {input}
+ output_var: result
+
+# Final output template
+output: "{result}"
+
+The tool's identifier. Must be lowercase with hyphens only:
+name: my-cool-tool # Good
+name: MyCoolTool # Bad - no uppercase
+name: my_cool_tool # Bad - no underscores
+
+Semantic version string. Always quote it to prevent YAML parsing issues:
+version: "1.0.0" # Good
+version: 1.0 # Bad - YAML parses as float
+
+Use {variable} syntax in prompts and output:
{input} - Content piped to the tool{variable_name} - From arguments or previous stepsTo include literal braces, double them:
+prompt: |
+ Format as JSON: {{\"key\": \"value\"}}
+ Input: {input}
+
+Standard categories for the registry:
+text-processing - Summarize, translate, formatcode-analysis - Review, explain, generatedata-extraction - Parse, extract, convertcontent-creation - Write, expand, draftproductivity - Automate, organizeeducation - Explain, teach, simplifyTest your config without running:
+# Validate syntax
+smarttools test my-tool --dry-run
+
+# Check for common issues
+smarttools registry publish --dry-run
+""",
+ "headings": [
+ ("file-location", "File Location"),
+ ("complete-structure", "Complete Structure"),
+ ("required-fields", "Required Fields"),
+ ("variable-substitution", "Variable Substitution"),
+ ("categories", "Categories"),
+ ("validation", "Validation"),
+ ],
+ },
+
+ "arguments": {
+ "title": "Custom Arguments",
+ "description": "Add flags and options to make your tools flexible",
+ "content": """
+Arguments let users customize tool behavior with CLI flags like --format json
+or --verbose.
arguments:
+ - flag: --format # The CLI flag
+ variable: format # Variable name in templates
+ default: "text" # Default value if not provided
+ description: "Output format (text, json, markdown)"
+
+Reference arguments in prompts using {variable_name}:
arguments:
+ - flag: --tone
+ variable: tone
+ default: "professional"
+
+steps:
+ - type: prompt
+ provider: claude
+ prompt: |
+ Rewrite this text with a {tone} tone:
+
+ {input}
+ output_var: result
+
+Users can then run:
+echo "Hey, fix this bug ASAP!" | tone-shift --tone friendly
+
+arguments:
+ - flag: --lang
+ variable: language
+ default: "English"
+ description: "Target language"
+
+ - flag: --formality
+ variable: formality
+ default: "neutral"
+ description: "Formality level (casual, neutral, formal)"
+
+ - flag: --max-length
+ variable: max_length
+ default: "500"
+ description: "Maximum output length in words"
+
+Document valid choices in the description:
+- flag: --style
+ variable: style
+ default: "concise"
+ description: "Writing style: concise, detailed, or academic"
+
+Always quote defaults to avoid YAML issues:
+- flag: --max-tokens
+ variable: max_tokens
+ default: "1000" # Quoted string, not integer
+
+Use string values for conditional prompts:
+- flag: --verbose
+ variable: verbose
+ default: "no"
+ description: "Include detailed explanations (yes/no)"
+
+Combine multiple arguments in your prompt template:
+steps:
+ - type: prompt
+ provider: claude
+ prompt: |
+ Translate the following text to {language}.
+ Use a {formality} register.
+ Keep the response under {max_length} words.
+
+ Text to translate:
+ {input}
+ output_var: translation
+
+target_language not tl--lang not --target-languageComplex tools can chain multiple steps together. Each step's output becomes available +to subsequent steps.
+ +steps:
+ # Step 1: Extract key points
+ - type: prompt
+ provider: claude
+ prompt: "Extract 5 key points from: {input}"
+ output_var: key_points
+
+ # Step 2: Use step 1's output
+ - type: prompt
+ provider: claude
+ prompt: |
+ Create a summary from these points:
+ {key_points}
+ output_var: summary
+
+output: "{summary}"
+
+Variables flow through the pipeline:
+{input} → available in all steps{key_points} → available after step 1{summary} → available after step 2Combine AI calls with Python processing:
+steps:
+ # Step 1: AI extracts data
+ - type: prompt
+ provider: claude
+ prompt: |
+ Extract all email addresses from this text as a comma-separated list:
+ {input}
+ output_var: emails_raw
+
+ # Step 2: Python cleans the data
+ - type: code
+ code: |
+ emails = [e.strip() for e in emails_raw.split(',')]
+ emails = [e for e in emails if '@' in e]
+ email_count = len(emails)
+ cleaned_emails = '\\n'.join(sorted(set(emails)))
+ output_var: cleaned_emails, email_count
+
+ # Step 3: AI formats output
+ - type: prompt
+ provider: claude
+ prompt: |
+ Format these {email_count} emails as a nice list:
+ {cleaned_emails}
+ output_var: formatted
+
+output: "{formatted}"
+
+If any step fails, execution stops. Design steps to handle edge cases:
+steps:
+ - type: code
+ code: |
+ # Handle empty input gracefully
+ if not input.strip():
+ result = "No input provided"
+ skip_ai = "yes"
+ else:
+ result = input
+ skip_ai = "no"
+ output_var: result, skip_ai
+
+ - type: prompt
+ provider: claude
+ prompt: |
+ {result}
+ # AI prompt only runs if skip_ai is "no"
+ output_var: ai_response
+
+steps:
+ - type: prompt # Extract structured data
+ - type: code # Transform/filter
+ - type: prompt # Format for output
+
+steps:
+ - type: prompt # Break down into parts
+ - type: prompt # Combine insights
+
+steps:
+ - type: code # Validate input format
+ - type: prompt # Process if valid
+
+# Show prompts without running
+cat test.txt | my-tool --dry-run
+
+# See verbose output
+cat test.txt | my-tool --verbose
+""",
+ "headings": [
+ ("step-flow", "How Steps Flow"),
+ ("mixed-steps", "Mixing Prompt and Code Steps"),
+ ("error-handling", "Step Dependencies"),
+ ("common-patterns", "Common Patterns"),
+ ("debugging", "Debugging Multi-Step Tools"),
+ ],
+ },
+
+ "code-steps": {
+ "title": "Code Steps",
+ "description": "Add Python code processing between AI calls",
+ "content": """
+Code steps let you run Python code to process data, validate input, or transform +AI outputs between prompts.
+ +steps:
+ - type: code
+ code: |
+ # Python code here
+ result = input.upper()
+ output_var: result
+
+Code steps have access to:
+input - The original input textarguments:
+ - flag: --max
+ variable: max_items
+ default: "10"
+
+steps:
+ - type: prompt
+ prompt: "List items from: {input}"
+ output_var: items_raw
+
+ - type: code
+ code: |
+ # Access argument and previous step output
+ items = items_raw.strip().split('\\n')
+ limited = items[:int(max_items)]
+ result = '\\n'.join(limited)
+ output_var: result
+
+Return multiple values with comma-separated output_var:
+- type: code
+ code: |
+ lines = input.strip().split('\\n')
+ line_count = len(lines)
+ word_count = len(input.split())
+ char_count = len(input)
+ output_var: line_count, word_count, char_count
+
+- type: code
+ code: |
+ # Remove empty lines
+ lines = [l for l in input.split('\\n') if l.strip()]
+ cleaned = '\\n'.join(lines)
+ output_var: cleaned
+
+- type: code
+ code: |
+ import json
+ data = json.loads(ai_response)
+ formatted = json.dumps(data, indent=2)
+ output_var: formatted
+
+- type: code
+ code: |
+ import re
+ emails = re.findall(r'[\\w.-]+@[\\w.-]+', input)
+ valid_emails = '\\n'.join(emails) if emails else "No emails found"
+ output_var: valid_emails
+
+- type: code
+ code: |
+ from pathlib import Path
+ # Write to temp file
+ output_path = Path('/tmp/output.txt')
+ output_path.write_text(processed_text)
+ result = f"Saved to {output_path}"
+ output_var: result
+
+Standard library imports work in code steps:
+- type: code
+ code: |
+ import json
+ import re
+ from datetime import datetime
+ from pathlib import Path
+
+ timestamp = datetime.now().isoformat()
+ result = f"Processed at {timestamp}"
+ output_var: result
+
+Handle exceptions to prevent tool failures:
+- type: code
+ code: |
+ import json
+ try:
+ data = json.loads(ai_response)
+ result = data.get('summary', 'No summary found')
+ except json.JSONDecodeError:
+ result = ai_response # Fall back to raw response
+ output_var: result
+
+eval() on untrusted inputTake your tools to the next level with advanced patterns like multi-provider +workflows, dynamic prompts, and complex data pipelines.
+ +Use different AI providers for different tasks:
+steps:
+ # Fast model for extraction
+ - type: prompt
+ provider: opencode-grok
+ prompt: "Extract key facts from: {input}"
+ output_var: facts
+
+ # Powerful model for synthesis
+ - type: prompt
+ provider: claude-opus
+ prompt: |
+ Create a comprehensive analysis from these facts:
+ {facts}
+ output_var: analysis
+
+Use code steps to implement branching:
+steps:
+ # Analyze input type
+ - type: code
+ code: |
+ if input.strip().startswith('{'):
+ input_type = "json"
+ processed = input
+ elif ',' in input and '\\n' in input:
+ input_type = "csv"
+ processed = input
+ else:
+ input_type = "text"
+ processed = input
+ output_var: input_type, processed
+
+ # Different prompt based on type
+ - type: prompt
+ provider: claude
+ prompt: |
+ This is {input_type} data. Analyze it appropriately:
+ {processed}
+ output_var: result
+
+Multiple passes for quality improvement:
+steps:
+ # First draft
+ - type: prompt
+ provider: opencode-deepseek
+ prompt: "Write a summary of: {input}"
+ output_var: draft
+
+ # Critique
+ - type: prompt
+ provider: claude-haiku
+ prompt: |
+ Review this summary for accuracy and clarity.
+ List specific improvements needed:
+ {draft}
+ output_var: critique
+
+ # Final version
+ - type: prompt
+ provider: claude-sonnet
+ prompt: |
+ Improve this summary based on the feedback:
+
+ Original: {draft}
+
+ Feedback: {critique}
+ output_var: final
+
+name: csv-analyzer
+steps:
+ # Parse CSV
+ - type: code
+ code: |
+ import csv
+ from io import StringIO
+ reader = csv.DictReader(StringIO(input))
+ rows = list(reader)
+ headers = list(rows[0].keys()) if rows else []
+ row_count = len(rows)
+ sample = rows[:5]
+ output_var: headers, row_count, sample
+
+ # AI analysis
+ - type: prompt
+ provider: claude
+ prompt: |
+ Analyze this CSV data:
+ - Columns: {headers}
+ - Row count: {row_count}
+ - Sample rows: {sample}
+
+ Provide insights about the data structure and patterns.
+ output_var: analysis
+
+ # Generate code
+ - type: prompt
+ provider: claude
+ prompt: |
+ Based on this analysis: {analysis}
+
+ Write Python code to process this CSV and extract key metrics.
+ output_var: code
+
+Build prompts dynamically:
+arguments:
+ - flag: --task
+ variable: task
+ default: "summarize"
+
+steps:
+ - type: code
+ code: |
+ templates = {
+ "summarize": "Summarize this concisely:",
+ "explain": "Explain this for a beginner:",
+ "critique": "Provide constructive criticism of:",
+ "expand": "Expand on this with more detail:"
+ }
+ instruction = templates.get(task, templates["summarize"])
+ output_var: instruction
+
+ - type: prompt
+ provider: claude
+ prompt: |
+ {instruction}
+
+ {input}
+ output_var: result
+
+steps:
+ # Use code to call external commands
+ - type: code
+ code: |
+ import subprocess
+ # Run linter
+ result = subprocess.run(
+ ['pylint', '--output-format=json', '-'],
+ input=input,
+ capture_output=True,
+ text=True
+ )
+ lint_output = result.stdout
+ output_var: lint_output
+
+ # AI interprets results
+ - type: prompt
+ provider: claude
+ prompt: |
+ Explain these linting results in plain English
+ and suggest fixes:
+
+ {lint_output}
+ output_var: explanation
+
+