Bash · 385 bytes Raw Blame History
1 # LooseCannon Configuration
2
3 # Server port (default: 8765)
4 PORT=8765
5
6 # Ollama API URL (default: http://localhost:11434)
7 OLLAMA_URL=http://localhost:11434
8
9 # Ollama model to use (default: llama2)
10 # You can use any model available in Ollama: llama2, mistral, codellama, etc.
11 # Run 'ollama list' to see available models
12 OLLAMA_MODEL=llama2
13
14 # Debug mode (shows extra logging)
15 DEBUG=false