fixes to local backend/models
- SHA
4a1b647914ad5e10e0bf8c6a91cf8352eb5f8fe1- Parents
-
a4adc5b - Tree
8ea7b4d
4a1b647
4a1b647914ad5e10e0bf8c6a91cf8352eb5f8fe1a4adc5b
8ea7b4d| Status | File | + | - |
|---|---|---|---|
| A |
.gitignore
|
2 | 0 |
| M |
cmd/mock.go
|
2 | 2 |
| M |
internal/config/config.go
|
3 | 3 |
| M |
internal/llm/manager.go
|
5 | 5 |
| M |
internal/llm/ollama.go
|
5 | 4 |
| M |
parrot.spec
|
14 | 7 |
.gitignoreadded@@ -0,0 +1,2 @@ | ||
| 1 | +dist/ | |
| 2 | +rpmbuild/ | |
cmd/mock.gomodified@@ -108,8 +108,8 @@ func generateSmartResponse(cmdType, command, exitCode string) (string, *config.C | ||
| 108 | 108 | // Build context-aware prompt with personality |
| 109 | 109 | prompt := prompts.BuildPrompt(cmdType, command, exitCode, cfg.General.Personality) |
| 110 | 110 | |
| 111 | - // Use a shorter overall timeout for shell responsiveness (max 2 seconds) | |
| 112 | - maxTimeout := 2 * time.Second | |
| 111 | + // Use a reasonable timeout for LLM responses (10 seconds max) | |
| 112 | + maxTimeout := 10 * time.Second | |
| 113 | 113 | ctx, cancel := context.WithTimeout(context.Background(), maxTimeout) |
| 114 | 114 | defer cancel() |
| 115 | 115 | |
internal/config/config.gomodified@@ -58,9 +58,9 @@ func DefaultConfig() *Config { | ||
| 58 | 58 | Local: LocalConfig{ |
| 59 | 59 | Enabled: true, |
| 60 | 60 | Provider: "ollama", |
| 61 | - Endpoint: "http://localhost:11434", | |
| 62 | - Model: "phi3.5:3.8b", | |
| 63 | - Timeout: 5, // Reduced from 30 to 5 seconds for responsiveness | |
| 61 | + Endpoint: "http://127.0.0.1:11434", | |
| 62 | + Model: "llama3.2:3b", | |
| 63 | + Timeout: 8, // Adequate time for local LLM processing | |
| 64 | 64 | }, |
| 65 | 65 | General: GeneralConfig{ |
| 66 | 66 | Personality: "savage", |
internal/llm/manager.gomodified@@ -94,19 +94,19 @@ func (m *LLMManager) Generate(ctx context.Context, prompt string, commandType st | ||
| 94 | 94 | fmt.Printf("🔍 Trying local backend...\n") |
| 95 | 95 | } |
| 96 | 96 | |
| 97 | - // Create timeout context for local calls with reasonable timeout | |
| 97 | + // Create timeout context for local calls | |
| 98 | 98 | timeoutDuration := time.Duration(m.config.Local.Timeout) * time.Second |
| 99 | - if timeoutDuration < 30*time.Second { | |
| 100 | - timeoutDuration = 30 * time.Second // Minimum 30s for graceful degradation | |
| 101 | - } | |
| 102 | 99 | localCtx, cancel := context.WithTimeout(ctx, timeoutDuration) |
| 103 | 100 | defer cancel() |
| 104 | 101 | |
| 105 | 102 | response, err := m.ollamaClient.Generate(localCtx, prompt) |
| 103 | + if m.config.General.Debug { | |
| 104 | + fmt.Printf("🐛 Raw Ollama response: '%s', error: %v\n", response, err) | |
| 105 | + } | |
| 106 | 106 | if err == nil && response != "" { |
| 107 | 107 | response = m.cleanResponse(response) |
| 108 | 108 | if m.config.General.Debug { |
| 109 | - fmt.Printf("✅ Local backend succeeded\n") | |
| 109 | + fmt.Printf("✅ Local backend succeeded with: '%s'\n", response) | |
| 110 | 110 | } |
| 111 | 111 | return response, BackendLocal |
| 112 | 112 | } |
internal/llm/ollama.gomodified@@ -29,7 +29,7 @@ type GenerateResponse struct { | ||
| 29 | 29 | |
| 30 | 30 | func NewOllamaClient(baseURL, model string) *OllamaClient { |
| 31 | 31 | if baseURL == "" { |
| 32 | - baseURL = "http://localhost:11434" | |
| 32 | + baseURL = "http://127.0.0.1:11434" // Use IPv4 explicitly to avoid IPv6 issues | |
| 33 | 33 | } |
| 34 | 34 | if model == "" { |
| 35 | 35 | model = "llama3.2:3b" |
@@ -39,7 +39,7 @@ func NewOllamaClient(baseURL, model string) *OllamaClient { | ||
| 39 | 39 | BaseURL: baseURL, |
| 40 | 40 | Model: model, |
| 41 | 41 | client: &http.Client{ |
| 42 | - Timeout: 60 * time.Second, // Reasonable timeout with OLLAMA_KEEP_ALIVE | |
| 42 | + Timeout: 10 * time.Second, // More reasonable timeout for CLI | |
| 43 | 43 | }, |
| 44 | 44 | } |
| 45 | 45 | } |
@@ -69,12 +69,13 @@ func (c *OllamaClient) Generate(ctx context.Context, prompt string) (string, err | ||
| 69 | 69 | |
| 70 | 70 | resp, err := c.client.Do(httpReq) |
| 71 | 71 | if err != nil { |
| 72 | - return "", fmt.Errorf("failed to send request: %w", err) | |
| 72 | + return "", fmt.Errorf("failed to send request to %s: %w", c.BaseURL, err) | |
| 73 | 73 | } |
| 74 | 74 | defer resp.Body.Close() |
| 75 | 75 | |
| 76 | 76 | if resp.StatusCode != http.StatusOK { |
| 77 | - return "", fmt.Errorf("ollama API returned status: %d", resp.StatusCode) | |
| 77 | + body, _ := json.Marshal(resp.Body) | |
| 78 | + return "", fmt.Errorf("ollama API returned status %d: %s", resp.StatusCode, string(body)) | |
| 78 | 79 | } |
| 79 | 80 | |
| 80 | 81 | var genResp GenerateResponse |
parrot.specmodified@@ -2,7 +2,7 @@ | ||
| 2 | 2 | |
| 3 | 3 | Name: parrot |
| 4 | 4 | Version: 1.3.0 |
| 5 | -Release: 1%{?dist} | |
| 5 | +Release: 2%{?dist} | |
| 6 | 6 | Summary: Intelligent CLI command failure assistant with AI-powered responses |
| 7 | 7 | |
| 8 | 8 | License: MIT |
@@ -74,19 +74,22 @@ if command -v ollama >/dev/null 2>&1; then | ||
| 74 | 74 | else |
| 75 | 75 | echo "✅ AI model already available" |
| 76 | 76 | fi |
| 77 | - | |
| 78 | - echo "🔧 To enable shell integration, run: parrot install" | |
| 79 | - echo "💡 This adds smart command failure detection to your shell" | |
| 77 | + echo "" | |
| 80 | 78 | else |
| 81 | 79 | echo "🔄 Using built-in responses (no setup required)" |
| 82 | 80 | echo "" |
| 83 | 81 | echo "For AI-powered responses, install Ollama:" |
| 84 | 82 | echo " https://ollama.com/download" |
| 85 | - echo "Then run: parrot setup" | |
| 83 | + echo "" | |
| 86 | 84 | fi |
| 87 | 85 | |
| 86 | +echo "🚀 NEXT STEP: Run this command to enable shell integration:" | |
| 87 | +echo " parrot install" | |
| 88 | 88 | echo "" |
| 89 | -echo "Run 'parrot --help' to get started!" | |
| 89 | +echo "💡 This adds smart command failure detection to your shell" | |
| 90 | +echo " After running it, failed commands will trigger helpful responses!" | |
| 91 | +echo "" | |
| 92 | +echo "📖 For more options, run: parrot --help" | |
| 90 | 93 | |
| 91 | 94 | %preun |
| 92 | 95 | # Clean up shell integrations on uninstall |
@@ -104,7 +107,11 @@ fi | ||
| 104 | 107 | %{_docdir}/%{name}/ |
| 105 | 108 | |
| 106 | 109 | %changelog |
| 107 | -* Wed Sep 03 2025 mfw <espadonne@outlook.com> - 1.3.0-1 | |
| 110 | +* Fri Sep 13 2024 mfw <espadonne@outlook.com> - 1.3.0-2 | |
| 111 | +- Enhanced post-install messaging to clearly guide users to run 'parrot install' | |
| 112 | +- Improved shell integration setup instructions and user experience | |
| 113 | + | |
| 114 | +* Wed Sep 03 2024 mfw <espadonne@outlook.com> - 1.3.0-1 | |
| 108 | 115 | - Implemented transparent AI model management for seamless user experience |
| 109 | 116 | - Switched default model to llama3.2:3b (25% faster loading than phi3.5:3.8b) |
| 110 | 117 | - Added automatic OLLAMA_KEEP_ALIVE=1h configuration via parrot install |