tenseleyflow/parrot / f52cbf3

Browse files

Fix double output and backend fallback issues

- Prevent duplicate messages on shell reload
- Fix localhost IPv6 resolution issues
- Fix HTTP timeout handling
- Fix error response body reading

Co-authored-by: espadonne <espadonne@outlook.com>
Co-authored-by: mfwolffe <wolffemf@dukes.jmu.edu>
Authored by Claude <noreply@anthropic.com>
SHA
f52cbf34f17df4063916ba4355a01b4b5f147304
Parents
4a1b647
Tree
2ade231

6 changed files

StatusFile+-
M config/parrot.toml.example 2 1
M internal/llm/api.go 2 1
M internal/llm/manager.go 8 3
M internal/llm/ollama.go 6 3
M parrot-hook.sh 31 17
M test-config.toml 1 1
config/parrot.toml.examplemodified
@@ -39,7 +39,8 @@ timeout = 30
3939
 
4040
 [local]
4141
 # Ollama server endpoint
42
-base_url = "http://localhost:11434"
42
+# Note: Use 127.0.0.1 instead of localhost to avoid IPv6 resolution issues
43
+base_url = "http://127.0.0.1:11434"
4344
 
4445
 # Model to use (will be pulled automatically if not present)
4546
 # llama3.2:3b - Fast loading, good quality for CLI responses
internal/llm/api.gomodified
@@ -50,7 +50,8 @@ func NewAPIClient(endpoint, apiKey, model string, timeout int) *APIClient {
5050
 		APIKey:   apiKey,
5151
 		Model:    model,
5252
 		client: &http.Client{
53
-			Timeout: time.Duration(timeout) * time.Second,
53
+			// Use a generous timeout; actual timeout is controlled by context
54
+			Timeout: 60 * time.Second,
5455
 		},
5556
 	}
5657
 }
internal/llm/manager.gomodified
@@ -73,8 +73,13 @@ func (m *LLMManager) Generate(ctx context.Context, prompt string, commandType st
7373
 		if m.config.General.Debug {
7474
 			fmt.Printf("🔍 Trying API backend...\n")
7575
 		}
76
-		
77
-		response, err := m.apiClient.Generate(ctx, prompt)
76
+
77
+		// Create timeout context for API calls
78
+		timeoutDuration := time.Duration(m.config.API.Timeout) * time.Second
79
+		apiCtx, cancel := context.WithTimeout(ctx, timeoutDuration)
80
+		defer cancel()
81
+
82
+		response, err := m.apiClient.Generate(apiCtx, prompt)
7883
 		if err == nil && response != "" {
7984
 			response = m.cleanResponse(response)
8085
 			if m.config.General.Debug {
@@ -82,7 +87,7 @@ func (m *LLMManager) Generate(ctx context.Context, prompt string, commandType st
8287
 			}
8388
 			return response, BackendAPI
8489
 		}
85
-		
90
+
8691
 		if m.config.General.Debug {
8792
 			fmt.Printf("❌ API backend failed: %v\n", err)
8893
 		}
internal/llm/ollama.gomodified
@@ -39,7 +39,7 @@ func NewOllamaClient(baseURL, model string) *OllamaClient {
3939
 		BaseURL: baseURL,
4040
 		Model:   model,
4141
 		client: &http.Client{
42
-			Timeout: 10 * time.Second, // More reasonable timeout for CLI
42
+			Timeout: 60 * time.Second, // Maximum timeout; actual timeout controlled by context
4343
 		},
4444
 	}
4545
 }
@@ -74,8 +74,11 @@ func (c *OllamaClient) Generate(ctx context.Context, prompt string) (string, err
7474
 	defer resp.Body.Close()
7575
 
7676
 	if resp.StatusCode != http.StatusOK {
77
-		body, _ := json.Marshal(resp.Body)
78
-		return "", fmt.Errorf("ollama API returned status %d: %s", resp.StatusCode, string(body))
77
+		// Read the actual error response body
78
+		bodyBytes := make([]byte, 512) // Read first 512 bytes for error message
79
+		n, _ := resp.Body.Read(bodyBytes)
80
+		bodyStr := string(bodyBytes[:n])
81
+		return "", fmt.Errorf("ollama API returned status %d: %s", resp.StatusCode, bodyStr)
7982
 	}
8083
 
8184
 	var genResp GenerateResponse
parrot-hook.shmodified
@@ -50,22 +50,36 @@ parrot_precmd() {
5050
     fi
5151
 }
5252
 
53
-# Setup based on shell type
54
-if [ -n "$BASH_VERSION" ]; then
55
-    # Bash setup
56
-    PROMPT_COMMAND="parrot_prompt_command${PROMPT_COMMAND:+;$PROMPT_COMMAND}"
57
-    echo "🦜 Parrot is now watching your bash commands..."
58
-elif [ -n "$ZSH_VERSION" ]; then
59
-    # Zsh setup
60
-    autoload -Uz add-zsh-hook
61
-    add-zsh-hook preexec parrot_preexec
62
-    add-zsh-hook precmd parrot_precmd
63
-    echo "🦜 Parrot is now watching your zsh commands..."
64
-else
65
-    echo "⚠️  Parrot: Unsupported shell. Only bash and zsh are supported."
66
-fi
53
+# Setup based on shell type (only show messages if not already initialized)
54
+if [ -z "$PARROT_INITIALIZED" ]; then
55
+    if [ -n "$BASH_VERSION" ]; then
56
+        # Bash setup
57
+        PROMPT_COMMAND="parrot_prompt_command${PROMPT_COMMAND:+;$PROMPT_COMMAND}"
58
+        echo "🦜 Parrot is now watching your bash commands..."
59
+    elif [ -n "$ZSH_VERSION" ]; then
60
+        # Zsh setup
61
+        autoload -Uz add-zsh-hook
62
+        add-zsh-hook preexec parrot_preexec
63
+        add-zsh-hook precmd parrot_precmd
64
+        echo "🦜 Parrot is now watching your zsh commands..."
65
+    else
66
+        echo "⚠️  Parrot: Unsupported shell. Only bash and zsh are supported."
67
+    fi
68
+
69
+    # Show performance tip
70
+    if [ "${PARROT_ASYNC:-}" != "true" ]; then
71
+        echo "💡 Tip: Set PARROT_ASYNC=true to prevent terminal hangs on slow networks"
72
+    fi
6773
 
68
-# Show performance tip
69
-if [ "${PARROT_ASYNC:-}" != "true" ]; then
70
-    echo "💡 Tip: Set PARROT_ASYNC=true to prevent terminal hangs on slow networks"
74
+    # Mark as initialized for this shell session
75
+    export PARROT_INITIALIZED=1
76
+else
77
+    # Silent re-initialization (e.g., when sourcing .zshrc again)
78
+    if [ -n "$BASH_VERSION" ]; then
79
+        PROMPT_COMMAND="parrot_prompt_command${PROMPT_COMMAND:+;$PROMPT_COMMAND}"
80
+    elif [ -n "$ZSH_VERSION" ]; then
81
+        autoload -Uz add-zsh-hook
82
+        add-zsh-hook preexec parrot_preexec
83
+        add-zsh-hook precmd parrot_precmd
84
+    fi
7185
 fi
test-config.tomlmodified
@@ -9,7 +9,7 @@
99
 [local]
1010
   enabled = true
1111
   provider = "ollama"
12
-  endpoint = "http://localhost:11434"
12
+  endpoint = "http://127.0.0.1:11434"
1313
   model = "phi3.5:3.8b"
1414
   timeout = 30
1515