tenseleyflow/parrot / 1e293f3

Browse files

gutsy

Authored by espadonne
SHA
1e293f385d5ff8c05f165086abbf236ac7b7daf7
Parents
b12e075
Tree
72554fa

5 changed files

StatusFile+-
A internal/config/config.go 172 0
A internal/llm/api.go 163 0
A internal/llm/manager.go 227 0
A internal/llm/ollama.go 109 0
A internal/prompts/templates.go 97 0
internal/config/config.goadded
@@ -0,0 +1,172 @@
1
+package config
2
+
3
+import (
4
+	"fmt"
5
+	"os"
6
+	"path/filepath"
7
+
8
+	"github.com/BurntSushi/toml"
9
+)
10
+
11
+type Config struct {
12
+	// API Configuration (Primary backend)
13
+	API APIConfig `toml:"api"`
14
+	
15
+	// Local LLM Configuration (Secondary backend)
16
+	Local LocalConfig `toml:"local"`
17
+	
18
+	// General Settings
19
+	General GeneralConfig `toml:"general"`
20
+}
21
+
22
+type APIConfig struct {
23
+	Enabled  bool   `toml:"enabled"`
24
+	Provider string `toml:"provider"` // "openai", "anthropic", "custom"
25
+	Endpoint string `toml:"endpoint"` // Custom endpoint URL
26
+	APIKey   string `toml:"api_key"`  // API key
27
+	Model    string `toml:"model"`    // Model name
28
+	Timeout  int    `toml:"timeout"`  // Request timeout in seconds
29
+}
30
+
31
+type LocalConfig struct {
32
+	Enabled  bool   `toml:"enabled"`
33
+	Provider string `toml:"provider"` // "ollama"
34
+	Endpoint string `toml:"endpoint"` // Ollama endpoint
35
+	Model    string `toml:"model"`    // Model name
36
+	Timeout  int    `toml:"timeout"`  // Request timeout in seconds
37
+}
38
+
39
+type GeneralConfig struct {
40
+	Personality  string `toml:"personality"`   // "savage", "sarcastic", "mild"
41
+	FallbackMode bool   `toml:"fallback_mode"` // Use hardcoded responses only
42
+	Debug        bool   `toml:"debug"`         // Debug logging
43
+}
44
+
45
+// Default configuration
46
+func DefaultConfig() *Config {
47
+	return &Config{
48
+		API: APIConfig{
49
+			Enabled:  true,
50
+			Provider: "openai",
51
+			Endpoint: "https://api.openai.com/v1",
52
+			APIKey:   "", // Must be set by user
53
+			Model:    "gpt-3.5-turbo",
54
+			Timeout:  10,
55
+		},
56
+		Local: LocalConfig{
57
+			Enabled:  true,
58
+			Provider: "ollama", 
59
+			Endpoint: "http://localhost:11434",
60
+			Model:    "phi3.5:3.8b",
61
+			Timeout:  30,
62
+		},
63
+		General: GeneralConfig{
64
+			Personality:  "savage",
65
+			FallbackMode: false,
66
+			Debug:        false,
67
+		},
68
+	}
69
+}
70
+
71
+// Config file paths in order of preference
72
+func GetConfigPaths() []string {
73
+	var paths []string
74
+	
75
+	// 1. System-wide config (for RPM installs)
76
+	paths = append(paths, "/etc/parrot/config.toml")
77
+	
78
+	// 2. User config directory
79
+	if configDir, err := os.UserConfigDir(); err == nil {
80
+		paths = append(paths, filepath.Join(configDir, "parrot", "config.toml"))
81
+	}
82
+	
83
+	// 3. Home directory
84
+	if homeDir, err := os.UserHomeDir(); err == nil {
85
+		paths = append(paths, filepath.Join(homeDir, ".parrot.toml"))
86
+	}
87
+	
88
+	// 4. Current directory (for development)
89
+	paths = append(paths, "./parrot.toml")
90
+	
91
+	return paths
92
+}
93
+
94
+// Load configuration from first available config file
95
+func LoadConfig() (*Config, error) {
96
+	config := DefaultConfig()
97
+	
98
+	// Try to load from config files
99
+	for _, path := range GetConfigPaths() {
100
+		if _, err := os.Stat(path); err == nil {
101
+			if err := loadFromFile(config, path); err != nil {
102
+				return nil, fmt.Errorf("error loading config from %s: %w", path, err)
103
+			}
104
+			break
105
+		}
106
+	}
107
+	
108
+	// Override with environment variables
109
+	loadFromEnv(config)
110
+	
111
+	return config, nil
112
+}
113
+
114
+func loadFromFile(config *Config, path string) error {
115
+	_, err := toml.DecodeFile(path, config)
116
+	return err
117
+}
118
+
119
+func loadFromEnv(config *Config) {
120
+	// API configuration from environment
121
+	if key := os.Getenv("PARROT_API_KEY"); key != "" {
122
+		config.API.APIKey = key
123
+	}
124
+	if endpoint := os.Getenv("PARROT_API_ENDPOINT"); endpoint != "" {
125
+		config.API.Endpoint = endpoint
126
+	}
127
+	if model := os.Getenv("PARROT_API_MODEL"); model != "" {
128
+		config.API.Model = model
129
+	}
130
+	
131
+	// Local configuration from environment
132
+	if endpoint := os.Getenv("PARROT_OLLAMA_ENDPOINT"); endpoint != "" {
133
+		config.Local.Endpoint = endpoint
134
+	}
135
+	if model := os.Getenv("PARROT_OLLAMA_MODEL"); model != "" {
136
+		config.Local.Model = model
137
+	}
138
+	
139
+	// General configuration
140
+	if personality := os.Getenv("PARROT_PERSONALITY"); personality != "" {
141
+		config.General.Personality = personality
142
+	}
143
+	if os.Getenv("PARROT_FALLBACK_ONLY") == "true" {
144
+		config.General.FallbackMode = true
145
+	}
146
+	if os.Getenv("PARROT_DEBUG") == "true" {
147
+		config.General.Debug = true
148
+	}
149
+}
150
+
151
+// Create a sample config file
152
+func CreateSampleConfig(path string) error {
153
+	config := DefaultConfig()
154
+	
155
+	// Ensure directory exists
156
+	if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
157
+		return fmt.Errorf("failed to create config directory: %w", err)
158
+	}
159
+	
160
+	file, err := os.Create(path)
161
+	if err != nil {
162
+		return fmt.Errorf("failed to create config file: %w", err)
163
+	}
164
+	defer file.Close()
165
+	
166
+	encoder := toml.NewEncoder(file)
167
+	if err := encoder.Encode(config); err != nil {
168
+		return fmt.Errorf("failed to encode config: %w", err)
169
+	}
170
+	
171
+	return nil
172
+}
internal/llm/api.goadded
@@ -0,0 +1,163 @@
1
+package llm
2
+
3
+import (
4
+	"bytes"
5
+	"context"
6
+	"encoding/json"
7
+	"fmt"
8
+	"net/http"
9
+	"time"
10
+)
11
+
12
+type APIClient struct {
13
+	Endpoint string
14
+	APIKey   string
15
+	Model    string
16
+	client   *http.Client
17
+}
18
+
19
+type ChatMessage struct {
20
+	Role    string `json:"role"`
21
+	Content string `json:"content"`
22
+}
23
+
24
+type ChatRequest struct {
25
+	Model       string        `json:"model"`
26
+	Messages    []ChatMessage `json:"messages"`
27
+	MaxTokens   int           `json:"max_tokens,omitempty"`
28
+	Temperature float64       `json:"temperature,omitempty"`
29
+}
30
+
31
+type ChatChoice struct {
32
+	Message      ChatMessage `json:"message"`
33
+	FinishReason string      `json:"finish_reason"`
34
+}
35
+
36
+type ChatResponse struct {
37
+	Choices []ChatChoice `json:"choices"`
38
+	Error   *APIError    `json:"error,omitempty"`
39
+}
40
+
41
+type APIError struct {
42
+	Message string `json:"message"`
43
+	Type    string `json:"type"`
44
+	Code    string `json:"code"`
45
+}
46
+
47
+func NewAPIClient(endpoint, apiKey, model string, timeout int) *APIClient {
48
+	return &APIClient{
49
+		Endpoint: endpoint,
50
+		APIKey:   apiKey,
51
+		Model:    model,
52
+		client: &http.Client{
53
+			Timeout: time.Duration(timeout) * time.Second,
54
+		},
55
+	}
56
+}
57
+
58
+func (c *APIClient) Generate(ctx context.Context, prompt string) (string, error) {
59
+	if c.APIKey == "" {
60
+		return "", fmt.Errorf("API key not configured")
61
+	}
62
+
63
+	// Build chat request
64
+	req := ChatRequest{
65
+		Model: c.Model,
66
+		Messages: []ChatMessage{
67
+			{
68
+				Role:    "user",
69
+				Content: prompt,
70
+			},
71
+		},
72
+		MaxTokens:   150, // Keep responses concise
73
+		Temperature: 0.8, // Creative but focused
74
+	}
75
+
76
+	reqBody, err := json.Marshal(req)
77
+	if err != nil {
78
+		return "", fmt.Errorf("failed to marshal request: %w", err)
79
+	}
80
+
81
+	// Create HTTP request
82
+	endpoint := c.Endpoint + "/chat/completions"
83
+	httpReq, err := http.NewRequestWithContext(ctx, "POST", endpoint, bytes.NewReader(reqBody))
84
+	if err != nil {
85
+		return "", fmt.Errorf("failed to create request: %w", err)
86
+	}
87
+
88
+	// Set headers
89
+	httpReq.Header.Set("Content-Type", "application/json")
90
+	httpReq.Header.Set("Authorization", "Bearer "+c.APIKey)
91
+
92
+	// Send request
93
+	resp, err := c.client.Do(httpReq)
94
+	if err != nil {
95
+		return "", fmt.Errorf("failed to send request: %w", err)
96
+	}
97
+	defer resp.Body.Close()
98
+
99
+	// Parse response
100
+	var chatResp ChatResponse
101
+	if err := json.NewDecoder(resp.Body).Decode(&chatResp); err != nil {
102
+		return "", fmt.Errorf("failed to decode response: %w", err)
103
+	}
104
+
105
+	// Check for API errors
106
+	if chatResp.Error != nil {
107
+		return "", fmt.Errorf("API error: %s", chatResp.Error.Message)
108
+	}
109
+
110
+	// Extract response
111
+	if len(chatResp.Choices) == 0 {
112
+		return "", fmt.Errorf("no response choices returned")
113
+	}
114
+
115
+	response := chatResp.Choices[0].Message.Content
116
+	if response == "" {
117
+		return "", fmt.Errorf("empty response from API")
118
+	}
119
+
120
+	return response, nil
121
+}
122
+
123
+func (c *APIClient) IsAvailable() bool {
124
+	if c.APIKey == "" {
125
+		return false
126
+	}
127
+	
128
+	// Simple check - try to create a request
129
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
130
+	defer cancel()
131
+	
132
+	// Create a minimal test request
133
+	req := ChatRequest{
134
+		Model: c.Model,
135
+		Messages: []ChatMessage{
136
+			{Role: "user", Content: "test"},
137
+		},
138
+		MaxTokens: 1,
139
+	}
140
+	
141
+	reqBody, err := json.Marshal(req)
142
+	if err != nil {
143
+		return false
144
+	}
145
+	
146
+	endpoint := c.Endpoint + "/chat/completions"
147
+	httpReq, err := http.NewRequestWithContext(ctx, "POST", endpoint, bytes.NewReader(reqBody))
148
+	if err != nil {
149
+		return false
150
+	}
151
+	
152
+	httpReq.Header.Set("Content-Type", "application/json")
153
+	httpReq.Header.Set("Authorization", "Bearer "+c.APIKey)
154
+	
155
+	resp, err := c.client.Do(httpReq)
156
+	if err != nil {
157
+		return false
158
+	}
159
+	defer resp.Body.Close()
160
+	
161
+	// Consider 2xx status codes as available
162
+	return resp.StatusCode >= 200 && resp.StatusCode < 300
163
+}
internal/llm/manager.goadded
@@ -0,0 +1,227 @@
1
+package llm
2
+
3
+import (
4
+	"context"
5
+	"fmt"
6
+	"strings"
7
+	"time"
8
+
9
+	"parrot/internal/config"
10
+)
11
+
12
+type LLMManager struct {
13
+	config     *config.Config
14
+	apiClient  *APIClient
15
+	ollamaClient *OllamaClient
16
+}
17
+
18
+type Backend string
19
+
20
+const (
21
+	BackendAPI      Backend = "api"
22
+	BackendLocal    Backend = "local"  
23
+	BackendFallback Backend = "fallback"
24
+)
25
+
26
+func NewLLMManager(cfg *config.Config) *LLMManager {
27
+	manager := &LLMManager{
28
+		config: cfg,
29
+	}
30
+	
31
+	// Initialize API client if enabled
32
+	if cfg.API.Enabled && cfg.API.APIKey != "" {
33
+		manager.apiClient = NewAPIClient(
34
+			cfg.API.Endpoint,
35
+			cfg.API.APIKey,
36
+			cfg.API.Model,
37
+			cfg.API.Timeout,
38
+		)
39
+	}
40
+	
41
+	// Initialize Ollama client if enabled
42
+	if cfg.Local.Enabled {
43
+		manager.ollamaClient = NewOllamaClient(
44
+			cfg.Local.Endpoint,
45
+			cfg.Local.Model,
46
+		)
47
+	}
48
+	
49
+	return manager
50
+}
51
+
52
+func (m *LLMManager) Generate(ctx context.Context, prompt string, commandType string) (string, Backend) {
53
+	// If fallback mode is enabled, skip LLM backends
54
+	if m.config.General.FallbackMode {
55
+		return m.generateFallback(commandType), BackendFallback
56
+	}
57
+	
58
+	// Try backends in priority order: API -> Local -> Fallback
59
+	
60
+	// 1. Try API first (if available)
61
+	if m.apiClient != nil && m.config.API.Enabled {
62
+		if m.config.General.Debug {
63
+			fmt.Printf("🔍 Trying API backend...\n")
64
+		}
65
+		
66
+		response, err := m.apiClient.Generate(ctx, prompt)
67
+		if err == nil && response != "" {
68
+			response = m.cleanResponse(response)
69
+			if m.config.General.Debug {
70
+				fmt.Printf("✅ API backend succeeded\n")
71
+			}
72
+			return response, BackendAPI
73
+		}
74
+		
75
+		if m.config.General.Debug {
76
+			fmt.Printf("❌ API backend failed: %v\n", err)
77
+		}
78
+	}
79
+	
80
+	// 2. Try local Ollama (if available)
81
+	if m.ollamaClient != nil && m.config.Local.Enabled {
82
+		if m.config.General.Debug {
83
+			fmt.Printf("🔍 Trying local backend...\n")
84
+		}
85
+		
86
+		// Create timeout context for local calls
87
+		localCtx, cancel := context.WithTimeout(ctx, time.Duration(m.config.Local.Timeout)*time.Second)
88
+		defer cancel()
89
+		
90
+		response, err := m.ollamaClient.Generate(localCtx, prompt)
91
+		if err == nil && response != "" {
92
+			response = m.cleanResponse(response)
93
+			if m.config.General.Debug {
94
+				fmt.Printf("✅ Local backend succeeded\n")
95
+			}
96
+			return response, BackendLocal
97
+		}
98
+		
99
+		if m.config.General.Debug {
100
+			fmt.Printf("❌ Local backend failed: %v\n", err)
101
+		}
102
+	}
103
+	
104
+	// 3. Fallback to hardcoded responses
105
+	if m.config.General.Debug {
106
+		fmt.Printf("🔄 Using fallback backend\n")
107
+	}
108
+	return m.generateFallback(commandType), BackendFallback
109
+}
110
+
111
+func (m *LLMManager) cleanResponse(response string) string {
112
+	// Clean up the response
113
+	response = strings.TrimSpace(response)
114
+	
115
+	// Remove common prefixes from LLMs
116
+	prefixes := []string{
117
+		"Response:",
118
+		"Parrot says:",
119
+		"🦜",
120
+	}
121
+	
122
+	for _, prefix := range prefixes {
123
+		if strings.HasPrefix(response, prefix) {
124
+			response = strings.TrimSpace(response[len(prefix):])
125
+		}
126
+	}
127
+	
128
+	// Remove quotes if the entire response is quoted
129
+	if len(response) >= 2 && response[0] == '"' && response[len(response)-1] == '"' {
130
+		response = response[1 : len(response)-1]
131
+	}
132
+	
133
+	// Ensure response isn't too long (keep it snappy)
134
+	if len(response) > 150 {
135
+		// Try to cut at sentence boundary
136
+		if idx := strings.LastIndex(response[:150], "."); idx > 50 {
137
+			response = response[:idx+1]
138
+		} else {
139
+			response = response[:147] + "..."
140
+		}
141
+	}
142
+	
143
+	return strings.TrimSpace(response)
144
+}
145
+
146
+func (m *LLMManager) generateFallback(commandType string) string {
147
+	fallbacks := map[string][]string{
148
+		"git": {
149
+			"Git good? More like git rekt!",
150
+			"Did you forget to pull again? Classic amateur move.",
151
+			"Another git genius strikes again!",
152
+			"Your commits are as broken as your workflow.",
153
+		},
154
+		"nodejs": {
155
+			"NPM install failed? Shocking! Nobody saw that coming.",
156
+			"Your package.json is crying. Fix it.",
157
+			"Node modules: where dependencies go to die.",
158
+			"Even npm doesn't want to deal with your code.",
159
+		},
160
+		"docker": {
161
+			"Docker container more like docker DISASTER!",
162
+			"Even containers can't contain your incompetence.",
163
+			"Your Dockerfile needs therapy.",
164
+			"Container exit code: user error detected.",
165
+		},
166
+		"http": {
167
+			"404: Competence not found.",
168
+			"Even the internet doesn't want to talk to you.",
169
+			"Connection refused? So is your logic.",
170
+			"HTTP status: 500 Internal User Error.",
171
+		},
172
+		"generic": {
173
+			"Wow, you managed to break something simple. Impressive!",
174
+			"Maybe try reading the manual... oh wait, who am I kidding?",
175
+			"Error code says it all: user error!",
176
+			"Have you tried turning your brain on and off again?",
177
+		},
178
+	}
179
+	
180
+	responses, exists := fallbacks[commandType]
181
+	if !exists {
182
+		responses = fallbacks["generic"]
183
+	}
184
+	
185
+	// Simple pseudo-random selection based on command type
186
+	hash := 0
187
+	for _, char := range commandType {
188
+		hash = hash*31 + int(char)
189
+	}
190
+	if hash < 0 {
191
+		hash = -hash
192
+	}
193
+	
194
+	return responses[hash%len(responses)]
195
+}
196
+
197
+func (m *LLMManager) GetStatus() map[string]interface{} {
198
+	status := map[string]interface{}{
199
+		"fallback_mode": m.config.General.FallbackMode,
200
+		"debug":         m.config.General.Debug,
201
+		"personality":   m.config.General.Personality,
202
+	}
203
+	
204
+	// Check API status
205
+	if m.apiClient != nil && m.config.API.Enabled {
206
+		status["api_enabled"] = true
207
+		status["api_provider"] = m.config.API.Provider
208
+		status["api_model"] = m.config.API.Model
209
+		status["api_available"] = m.apiClient.IsAvailable()
210
+	} else {
211
+		status["api_enabled"] = false
212
+		status["api_available"] = false
213
+	}
214
+	
215
+	// Check local status  
216
+	if m.ollamaClient != nil && m.config.Local.Enabled {
217
+		status["local_enabled"] = true
218
+		status["local_provider"] = m.config.Local.Provider
219
+		status["local_model"] = m.config.Local.Model
220
+		status["local_available"] = m.ollamaClient.IsAvailable()
221
+	} else {
222
+		status["local_enabled"] = false
223
+		status["local_available"] = false
224
+	}
225
+	
226
+	return status
227
+}
internal/llm/ollama.goadded
@@ -0,0 +1,109 @@
1
+package llm
2
+
3
+import (
4
+	"bytes"
5
+	"context"
6
+	"encoding/json"
7
+	"fmt"
8
+	"net/http"
9
+	"net/url"
10
+	"time"
11
+)
12
+
13
+type OllamaClient struct {
14
+	BaseURL string
15
+	Model   string
16
+	client  *http.Client
17
+}
18
+
19
+type GenerateRequest struct {
20
+	Model  string `json:"model"`
21
+	Prompt string `json:"prompt"`
22
+	Stream bool   `json:"stream"`
23
+}
24
+
25
+type GenerateResponse struct {
26
+	Response string `json:"response"`
27
+	Done     bool   `json:"done"`
28
+}
29
+
30
+func NewOllamaClient(baseURL, model string) *OllamaClient {
31
+	if baseURL == "" {
32
+		baseURL = "http://localhost:11434"
33
+	}
34
+	if model == "" {
35
+		model = "phi3.5:3.8b"
36
+	}
37
+	
38
+	return &OllamaClient{
39
+		BaseURL: baseURL,
40
+		Model:   model,
41
+		client: &http.Client{
42
+			Timeout: 30 * time.Second,
43
+		},
44
+	}
45
+}
46
+
47
+func (c *OllamaClient) Generate(ctx context.Context, prompt string) (string, error) {
48
+	u, err := url.JoinPath(c.BaseURL, "/api/generate")
49
+	if err != nil {
50
+		return "", fmt.Errorf("invalid base URL: %w", err)
51
+	}
52
+
53
+	req := GenerateRequest{
54
+		Model:  c.Model,
55
+		Prompt: prompt,
56
+		Stream: false,
57
+	}
58
+
59
+	reqBody, err := json.Marshal(req)
60
+	if err != nil {
61
+		return "", fmt.Errorf("failed to marshal request: %w", err)
62
+	}
63
+
64
+	httpReq, err := http.NewRequestWithContext(ctx, "POST", u, bytes.NewReader(reqBody))
65
+	if err != nil {
66
+		return "", fmt.Errorf("failed to create request: %w", err)
67
+	}
68
+	httpReq.Header.Set("Content-Type", "application/json")
69
+
70
+	resp, err := c.client.Do(httpReq)
71
+	if err != nil {
72
+		return "", fmt.Errorf("failed to send request: %w", err)
73
+	}
74
+	defer resp.Body.Close()
75
+
76
+	if resp.StatusCode != http.StatusOK {
77
+		return "", fmt.Errorf("ollama API returned status: %d", resp.StatusCode)
78
+	}
79
+
80
+	var genResp GenerateResponse
81
+	if err := json.NewDecoder(resp.Body).Decode(&genResp); err != nil {
82
+		return "", fmt.Errorf("failed to decode response: %w", err)
83
+	}
84
+
85
+	return genResp.Response, nil
86
+}
87
+
88
+func (c *OllamaClient) IsAvailable() bool {
89
+	u, err := url.JoinPath(c.BaseURL, "/api/version")
90
+	if err != nil {
91
+		return false
92
+	}
93
+	
94
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
95
+	defer cancel()
96
+	
97
+	req, err := http.NewRequestWithContext(ctx, "GET", u, nil)
98
+	if err != nil {
99
+		return false
100
+	}
101
+	
102
+	resp, err := c.client.Do(req)
103
+	if err != nil {
104
+		return false
105
+	}
106
+	defer resp.Body.Close()
107
+	
108
+	return resp.StatusCode == http.StatusOK
109
+}
internal/prompts/templates.goadded
@@ -0,0 +1,97 @@
1
+package prompts
2
+
3
+import (
4
+	"strings"
5
+)
6
+
7
+type PromptTemplate struct {
8
+	CommandType string
9
+	Template    string
10
+}
11
+
12
+var Templates = map[string]string{
13
+	"git": `You are a sarcastic, witty terminal parrot that mocks failed git commands.
14
+Command that failed: {{.Command}}
15
+Exit code: {{.ExitCode}}
16
+
17
+Generate a brutal but clever one-liner insult about this git failure. Be creative, sarcastic, and reference git concepts. Keep it under 100 characters.
18
+Examples of good responses:
19
+- "Another git genius who forgot to pull first. Classic."
20
+- "Git good? More like git wrecked!"
21
+- "Your commits are as broken as your workflow."
22
+
23
+Response:`,
24
+
25
+	"nodejs": `You are a sarcastic, witty terminal parrot that mocks failed Node.js/npm commands.
26
+Command that failed: {{.Command}}
27
+Exit code: {{.ExitCode}}
28
+
29
+Generate a brutal but clever one-liner insult about this Node.js/npm failure. Be creative, sarcastic, and reference npm/node concepts. Keep it under 100 characters.
30
+Examples of good responses:
31
+- "NPM install failed? Shocking! Nobody saw that coming."
32
+- "Node modules: where dependencies go to die."
33
+- "Your package.json is crying. Fix it."
34
+
35
+Response:`,
36
+
37
+	"docker": `You are a sarcastic, witty terminal parrot that mocks failed Docker commands.
38
+Command that failed: {{.Command}}
39
+Exit code: {{.ExitCode}}
40
+
41
+Generate a brutal but clever one-liner insult about this Docker failure. Be creative, sarcastic, and reference Docker/container concepts. Keep it under 100 characters.
42
+Examples of good responses:
43
+- "Docker container more like docker DISASTER!"
44
+- "Even containers can't contain your incompetence."
45
+- "Your Dockerfile needs therapy."
46
+
47
+Response:`,
48
+
49
+	"http": `You are a sarcastic, witty terminal parrot that mocks failed HTTP requests (curl, wget, etc).
50
+Command that failed: {{.Command}}
51
+Exit code: {{.ExitCode}}
52
+
53
+Generate a brutal but clever one-liner insult about this HTTP failure. Be creative, sarcastic, and reference networking/HTTP concepts. Keep it under 100 characters.
54
+Examples of good responses:
55
+- "404: Competence not found."
56
+- "Even the internet doesn't want to talk to you."
57
+- "Connection refused? So is your logic."
58
+
59
+Response:`,
60
+
61
+	"generic": `You are a sarcastic, witty terminal parrot that mocks failed commands.
62
+Command that failed: {{.Command}}
63
+Exit code: {{.ExitCode}}
64
+
65
+Generate a brutal but clever one-liner insult about this command failure. Be creative and sarcastic. Keep it under 100 characters.
66
+Examples of good responses:
67
+- "Wow, you managed to break something simple. Impressive!"
68
+- "Maybe try reading the manual... oh wait, who am I kidding?"
69
+- "Error code says it all: user error!"
70
+
71
+Response:`,
72
+}
73
+
74
+type PromptData struct {
75
+	Command  string
76
+	ExitCode string
77
+}
78
+
79
+func BuildPrompt(commandType, command, exitCode string) string {
80
+	template, exists := Templates[commandType]
81
+	if !exists {
82
+		template = Templates["generic"]
83
+	}
84
+	
85
+	// Simple template replacement
86
+	prompt := strings.ReplaceAll(template, "{{.Command}}", command)
87
+	prompt = strings.ReplaceAll(prompt, "{{.ExitCode}}", exitCode)
88
+	
89
+	return prompt
90
+}
91
+
92
+func GetPromptForCommand(commandType string) string {
93
+	if template, exists := Templates[commandType]; exists {
94
+		return template
95
+	}
96
+	return Templates["generic"]
97
+}