tenseleyflow/parrot / 4a1b647

Browse files

fixes to local backend/models

Authored by espadonne
SHA
4a1b647914ad5e10e0bf8c6a91cf8352eb5f8fe1
Parents
a4adc5b
Tree
8ea7b4d

6 changed files

StatusFile+-
A .gitignore 2 0
M cmd/mock.go 2 2
M internal/config/config.go 3 3
M internal/llm/manager.go 5 5
M internal/llm/ollama.go 5 4
M parrot.spec 14 7
.gitignoreadded
@@ -0,0 +1,2 @@
1
+dist/
2
+rpmbuild/
cmd/mock.gomodified
@@ -108,8 +108,8 @@ func generateSmartResponse(cmdType, command, exitCode string) (string, *config.C
108108
 	// Build context-aware prompt with personality
109109
 	prompt := prompts.BuildPrompt(cmdType, command, exitCode, cfg.General.Personality)
110110
 	
111
-	// Use a shorter overall timeout for shell responsiveness (max 2 seconds)
112
-	maxTimeout := 2 * time.Second
111
+	// Use a reasonable timeout for LLM responses (10 seconds max)
112
+	maxTimeout := 10 * time.Second
113113
 	ctx, cancel := context.WithTimeout(context.Background(), maxTimeout)
114114
 	defer cancel()
115115
 	
internal/config/config.gomodified
@@ -58,9 +58,9 @@ func DefaultConfig() *Config {
5858
 		Local: LocalConfig{
5959
 			Enabled:  true,
6060
 			Provider: "ollama", 
61
-			Endpoint: "http://localhost:11434",
62
-			Model:    "phi3.5:3.8b",
63
-			Timeout:  5,  // Reduced from 30 to 5 seconds for responsiveness
61
+			Endpoint: "http://127.0.0.1:11434",
62
+			Model:    "llama3.2:3b",
63
+			Timeout:  8,  // Adequate time for local LLM processing
6464
 		},
6565
 		General: GeneralConfig{
6666
 			Personality:  "savage",
internal/llm/manager.gomodified
@@ -94,19 +94,19 @@ func (m *LLMManager) Generate(ctx context.Context, prompt string, commandType st
9494
 			fmt.Printf("🔍 Trying local backend...\n")
9595
 		}
9696
 		
97
-		// Create timeout context for local calls with reasonable timeout
97
+		// Create timeout context for local calls
9898
 		timeoutDuration := time.Duration(m.config.Local.Timeout) * time.Second
99
-		if timeoutDuration < 30*time.Second {
100
-			timeoutDuration = 30 * time.Second // Minimum 30s for graceful degradation
101
-		}
10299
 		localCtx, cancel := context.WithTimeout(ctx, timeoutDuration)
103100
 		defer cancel()
104101
 		
105102
 		response, err := m.ollamaClient.Generate(localCtx, prompt)
103
+		if m.config.General.Debug {
104
+			fmt.Printf("🐛 Raw Ollama response: '%s', error: %v\n", response, err)
105
+		}
106106
 		if err == nil && response != "" {
107107
 			response = m.cleanResponse(response)
108108
 			if m.config.General.Debug {
109
-				fmt.Printf("✅ Local backend succeeded\n")
109
+				fmt.Printf("✅ Local backend succeeded with: '%s'\n", response)
110110
 			}
111111
 			return response, BackendLocal
112112
 		}
internal/llm/ollama.gomodified
@@ -29,7 +29,7 @@ type GenerateResponse struct {
2929
 
3030
 func NewOllamaClient(baseURL, model string) *OllamaClient {
3131
 	if baseURL == "" {
32
-		baseURL = "http://localhost:11434"
32
+		baseURL = "http://127.0.0.1:11434" // Use IPv4 explicitly to avoid IPv6 issues
3333
 	}
3434
 	if model == "" {
3535
 		model = "llama3.2:3b"
@@ -39,7 +39,7 @@ func NewOllamaClient(baseURL, model string) *OllamaClient {
3939
 		BaseURL: baseURL,
4040
 		Model:   model,
4141
 		client: &http.Client{
42
-			Timeout: 60 * time.Second, // Reasonable timeout with OLLAMA_KEEP_ALIVE
42
+			Timeout: 10 * time.Second, // More reasonable timeout for CLI
4343
 		},
4444
 	}
4545
 }
@@ -69,12 +69,13 @@ func (c *OllamaClient) Generate(ctx context.Context, prompt string) (string, err
6969
 
7070
 	resp, err := c.client.Do(httpReq)
7171
 	if err != nil {
72
-		return "", fmt.Errorf("failed to send request: %w", err)
72
+		return "", fmt.Errorf("failed to send request to %s: %w", c.BaseURL, err)
7373
 	}
7474
 	defer resp.Body.Close()
7575
 
7676
 	if resp.StatusCode != http.StatusOK {
77
-		return "", fmt.Errorf("ollama API returned status: %d", resp.StatusCode)
77
+		body, _ := json.Marshal(resp.Body)
78
+		return "", fmt.Errorf("ollama API returned status %d: %s", resp.StatusCode, string(body))
7879
 	}
7980
 
8081
 	var genResp GenerateResponse
parrot.specmodified
@@ -2,7 +2,7 @@
22
 
33
 Name:           parrot
44
 Version:        1.3.0
5
-Release:        1%{?dist}
5
+Release:        2%{?dist}
66
 Summary:        Intelligent CLI command failure assistant with AI-powered responses
77
 
88
 License:        MIT
@@ -74,19 +74,22 @@ if command -v ollama >/dev/null 2>&1; then
7474
     else
7575
         echo "✅ AI model already available"
7676
     fi
77
-    
78
-    echo "🔧 To enable shell integration, run: parrot install"
79
-    echo "💡 This adds smart command failure detection to your shell"
77
+    echo ""
8078
 else
8179
     echo "🔄 Using built-in responses (no setup required)"
8280
     echo ""
8381
     echo "For AI-powered responses, install Ollama:"
8482
     echo "  https://ollama.com/download"
85
-    echo "Then run: parrot setup"
83
+    echo ""
8684
 fi
8785
 
86
+echo "🚀 NEXT STEP: Run this command to enable shell integration:"
87
+echo "    parrot install"
8888
 echo ""
89
-echo "Run 'parrot --help' to get started!"
89
+echo "💡 This adds smart command failure detection to your shell"
90
+echo "   After running it, failed commands will trigger helpful responses!"
91
+echo ""
92
+echo "📖 For more options, run: parrot --help"
9093
 
9194
 %preun
9295
 # Clean up shell integrations on uninstall
@@ -104,7 +107,11 @@ fi
104107
 %{_docdir}/%{name}/
105108
 
106109
 %changelog
107
-* Wed Sep 03 2025 mfw <espadonne@outlook.com> - 1.3.0-1
110
+* Fri Sep 13 2024 mfw <espadonne@outlook.com> - 1.3.0-2
111
+- Enhanced post-install messaging to clearly guide users to run 'parrot install'
112
+- Improved shell integration setup instructions and user experience
113
+
114
+* Wed Sep 03 2024 mfw <espadonne@outlook.com> - 1.3.0-1
108115
 - Implemented transparent AI model management for seamless user experience
109116
 - Switched default model to llama3.2:3b (25% faster loading than phi3.5:3.8b)
110117
 - Added automatic OLLAMA_KEEP_ALIVE=1h configuration via parrot install