| 1 | # Parrot Configuration Example |
| 2 | # Copy this file to one of the following locations: |
| 3 | # - ~/.config/parrot/parrot.toml (user-specific) |
| 4 | # - ~/.parrot.toml (user-specific, legacy) |
| 5 | # - /etc/parrot/parrot.toml (system-wide) |
| 6 | |
| 7 | # ==================== BACKEND CONFIGURATION ==================== |
| 8 | |
| 9 | # Backend priority: "api", "local", "fallback" |
| 10 | # The system will try backends in this order until one works |
| 11 | backend_priority = ["local", "api", "fallback"] |
| 12 | |
| 13 | # Default backend to use (overrides priority for direct selection) |
| 14 | # default_backend = "local" |
| 15 | |
| 16 | # ==================== API BACKEND SETTINGS ==================== |
| 17 | |
| 18 | [api] |
| 19 | # OpenAI-compatible API endpoint |
| 20 | base_url = "https://api.openai.com/v1" |
| 21 | |
| 22 | # API key for authentication |
| 23 | # Can also be set via PARROT_API_KEY environment variable |
| 24 | # api_key = "your-api-key-here" |
| 25 | |
| 26 | # Model to use for generation |
| 27 | model = "gpt-4o-mini" |
| 28 | |
| 29 | # Maximum tokens for response |
| 30 | max_tokens = 150 |
| 31 | |
| 32 | # Temperature for response creativity (0.0 = deterministic, 1.0 = creative) |
| 33 | temperature = 0.7 |
| 34 | |
| 35 | # Request timeout in seconds |
| 36 | timeout = 30 |
| 37 | |
| 38 | # ==================== LOCAL BACKEND SETTINGS ==================== |
| 39 | |
| 40 | [local] |
| 41 | # Ollama server endpoint |
| 42 | # Note: Use 127.0.0.1 instead of localhost to avoid IPv6 resolution issues |
| 43 | base_url = "http://127.0.0.1:11434" |
| 44 | |
| 45 | # Model to use (will be pulled automatically if not present) |
| 46 | # llama3.2:3b - Fast loading, good quality for CLI responses |
| 47 | model = "llama3.2:3b" |
| 48 | |
| 49 | # Generation options |
| 50 | max_tokens = 150 |
| 51 | temperature = 0.7 |
| 52 | timeout = 45 # Balanced timeout for graceful degradation |
| 53 | |
| 54 | # Model management |
| 55 | auto_pull = true # Automatically pull model if not available |
| 56 | pull_timeout = 300 # Timeout for model pulling in seconds |
| 57 | |
| 58 | # Performance tip: parrot install sets OLLAMA_KEEP_ALIVE=1h |
| 59 | # This keeps models loaded in memory for faster responses |
| 60 | |
| 61 | # ==================== PERSONALITY SETTINGS ==================== |
| 62 | |
| 63 | # Personality level: "mild", "sarcastic", "savage" |
| 64 | personality = "sarcastic" |
| 65 | |
| 66 | # Enable colored output |
| 67 | colors = true |
| 68 | |
| 69 | # Color scheme per personality (auto-detected if not specified) |
| 70 | # [colors] |
| 71 | # mild = "blue" |
| 72 | # sarcastic = "yellow" |
| 73 | # savage = "red" |
| 74 | |
| 75 | # ==================== SHELL INTEGRATION ==================== |
| 76 | |
| 77 | [shell] |
| 78 | # Enable shell integration hooks |
| 79 | enabled = true |
| 80 | |
| 81 | # Shells to integrate with (detected automatically) |
| 82 | # supported_shells = ["bash", "zsh"] |
| 83 | |
| 84 | # Hook installation paths (auto-detected) |
| 85 | # bash_profile = "~/.bashrc" |
| 86 | # zsh_profile = "~/.zshrc" |
| 87 | |
| 88 | # ==================== ADVANCED SETTINGS ==================== |
| 89 | |
| 90 | [advanced] |
| 91 | # Enable debug logging |
| 92 | debug = false |
| 93 | |
| 94 | # Log file location (empty = no file logging) |
| 95 | # log_file = "~/.config/parrot/parrot.log" |
| 96 | |
| 97 | # Cache settings |
| 98 | cache_enabled = true |
| 99 | cache_duration = 3600 # seconds |
| 100 | |
| 101 | # Retry settings |
| 102 | max_retries = 3 |
| 103 | retry_delay = 1 # seconds |
| 104 | |
| 105 | # ==================== FEATURE FLAGS ==================== |
| 106 | |
| 107 | [features] |
| 108 | # Enable experimental features |
| 109 | experimental = false |
| 110 | |
| 111 | # Enable telemetry (anonymous usage statistics) |
| 112 | telemetry = false |
| 113 | |
| 114 | # Enable auto-updates check |
| 115 | auto_update_check = true |