diff --git a/src/smarttools/cli.py b/src/smarttools/cli.py index c0a93a9..e43e5bc 100644 --- a/src/smarttools/cli.py +++ b/src/smarttools/cli.py @@ -342,24 +342,24 @@ PROVIDER_INSTALL_INFO = { "claude": { "group": "Anthropic Claude", "install_cmd": "npm install -g @anthropic-ai/claude-code", - "requires": "Node.js and npm", - "setup": "Run 'claude' and follow login prompts", - "cost": "Pay-per-use (API key required)", + "requires": "Node.js 18+ and npm", + "setup": "Run 'claude' - opens browser for sign-in (auto-saves auth tokens)", + "cost": "Pay-per-use (billed to your Anthropic account)", "variants": ["claude", "claude-haiku", "claude-opus", "claude-sonnet"], }, "codex": { "group": "OpenAI Codex", "install_cmd": "pip install openai-codex", "requires": "Python 3.8+", - "setup": "Set OPENAI_API_KEY environment variable", - "cost": "Pay-per-use (API key required)", + "setup": "Run 'codex' - opens browser for sign-in (auto-saves auth tokens)", + "cost": "Pay-per-use (billed to your OpenAI account)", "variants": ["codex"], }, "gemini": { "group": "Google Gemini", "install_cmd": "pip install google-generativeai", "requires": "Python 3.8+", - "setup": "Set GOOGLE_API_KEY or run 'gemini auth'", + "setup": "Run 'gemini auth' - opens browser for Google sign-in", "cost": "Free tier available, pay-per-use for more", "variants": ["gemini", "gemini-flash"], }, @@ -367,18 +367,19 @@ PROVIDER_INSTALL_INFO = { "group": "OpenCode", "install_cmd": "curl -fsSL https://opencode.ai/install.sh | bash", "requires": "curl, bash", - "setup": "Run 'opencode auth' to authenticate", - "cost": "Free tier (pickle), paid for other models", + "setup": "Run 'opencode auth' - opens browser for sign-in", + "cost": "Free tier (pickle model), paid for premium models", "variants": ["opencode-deepseek", "opencode-pickle", "opencode-nano", "opencode-reasoner", "opencode-grok"], }, "ollama": { - "group": "Ollama (Local)", + "group": "Ollama (Local LLMs)", "install_cmd": "curl -fsSL https://ollama.ai/install.sh | bash", - "requires": "curl, bash, decent GPU recommended", - "setup": "Run 'ollama pull llama3' to download a model", - "cost": "FREE (runs locally)", + "requires": "curl, bash, 8GB+ RAM (GPU recommended)", + "setup": "Run 'ollama pull llama3' to download a model, then add provider", + "cost": "FREE (runs entirely on your machine)", "variants": [], "custom": True, + "post_install_note": "After installing, add the provider:\n smarttools providers add ollama 'ollama run llama3' -d 'Local Llama 3'", }, } @@ -464,7 +465,11 @@ def cmd_providers(args): print() print(f"Next steps:") print(f" 1. {info['setup']}") - print(f" 2. Test with: smarttools providers test {info['variants'][0] if info['variants'] else selected}") + if info.get('post_install_note'): + print(f" 2. {info['post_install_note']}") + print(f" 3. Test with: smarttools providers test {selected}") + else: + print(f" 2. Test with: smarttools providers test {info['variants'][0] if info['variants'] else selected}") else: print() print(f"Installation failed (exit code {result.returncode})")