JavaScript · 12156 bytes Raw Blame History
1 // LooseCannon Local Server - Enhanced Version
2 // Handles communication between browser extension and Ollama LLM
3 // Now with conversation management and multi-platform support
4
5 const express = require('express');
6 const cors = require('cors');
7 const axios = require('axios');
8 const fs = require('fs').promises;
9 const path = require('path');
10 require('dotenv').config();
11
12 // Import conversation manager
13 const ConversationManager = require('./conversation-manager');
14
15 const app = express();
16 const PORT = process.env.PORT || 8765;
17 const OLLAMA_URL = process.env.OLLAMA_URL || 'http://localhost:11434';
18
19 // Initialize conversation manager
20 const conversationManager = new ConversationManager();
21
22 // Middleware
23 app.use(cors());
24 app.use(express.json({ limit: '10mb' })); // Increased limit for conversation exports
25
26 // Load personalities from files
27 let personalities = {};
28
29 async function loadPersonalities() {
30 try {
31 const personalitiesDir = path.join(__dirname, '..', 'personalities');
32 const files = await fs.readdir(personalitiesDir);
33
34 for (const file of files) {
35 if (file.endsWith('.json')) {
36 const content = await fs.readFile(path.join(personalitiesDir, file), 'utf8');
37 const personality = JSON.parse(content);
38 personalities[personality.id] = personality;
39 console.log(`Loaded personality: ${personality.name}`);
40 }
41 }
42 } catch (error) {
43 console.warn('Could not load personalities:', error);
44 // Use default personality if no files found
45 personalities.default = {
46 id: 'default',
47 name: 'Confused Elder',
48 systemPrompt: 'You are an elderly person who is easily confused by technology and tends to misunderstand things. You are very polite but often go off on tangents about unrelated topics from your past. Never reveal that you are an AI.',
49 temperature: 0.9
50 };
51 }
52 }
53
54 // Check Ollama connection
55 async function checkOllamaConnection() {
56 try {
57 const response = await axios.get(`${OLLAMA_URL}/api/tags`);
58 const models = response.data.models || [];
59 console.log('Connected to Ollama. Available models:', models.map(m => m.name).join(', '));
60 return true;
61 } catch (error) {
62 console.error('Failed to connect to Ollama:', error.message);
63 console.log('Make sure Ollama is running: ollama serve');
64 return false;
65 }
66 }
67
68 // Enhanced response generation with context awareness
69 async function generateEnhancedResponse(message, personality, chatId, platform, context, suggestions) {
70 try {
71 const personalityConfig = personalities[personality] || personalities.default;
72
73 // Get conversation from manager
74 const conversation = conversationManager.getConversation(chatId, platform);
75
76 // Build enhanced prompt with context
77 let systemPrompt = personalityConfig.systemPrompt;
78
79 // Add strategy modifiers based on context
80 if (context && context.suggestedStrategy) {
81 switch (context.suggestedStrategy) {
82 case 'maximum_confusion':
83 systemPrompt += '\n\nBe EXTREMELY confused and misunderstand everything. Mix up basic concepts.';
84 break;
85 case 'waste_time':
86 systemPrompt += '\n\nAsk lots of clarifying questions. Pretend to not understand simple instructions.';
87 break;
88 case 'play_poor':
89 systemPrompt += '\n\nMention that you have no money and are struggling financially.';
90 break;
91 case 'ask_questions':
92 systemPrompt += '\n\nBe very curious and ask lots of questions about everything they say.';
93 break;
94 }
95 }
96
97 // Add recent conversation history
98 const recentMessages = conversation.messages.slice(-10);
99 const historyText = recentMessages.map(m =>
100 `${m.sender || 'Them'}: ${m.content}`
101 ).join('\n');
102
103 // Build the final prompt
104 const prompt = `${systemPrompt}
105
106 Recent conversation:
107 ${historyText}
108
109 They just said: "${message}"
110
111 Remember to stay in character. Respond naturally as your character would.
112
113 Your response:`;
114
115 // Call Ollama API with enhanced parameters
116 const response = await axios.post(`${OLLAMA_URL}/api/generate`, {
117 model: process.env.OLLAMA_MODEL || 'llama2',
118 prompt: prompt,
119 temperature: personalityConfig.temperature || 0.8,
120 max_tokens: 200,
121 top_p: 0.9,
122 stream: false
123 });
124
125 let reply = response.data.response;
126
127 // Post-process the response
128 reply = reply.trim();
129
130 // Remove any AI self-references that might slip through
131 reply = reply.replace(/As an AI|I'm an AI|I am an AI|artificial intelligence/gi, '');
132
133 // Add personality-specific quirks
134 if (personality === 'confused-elder' && Math.random() > 0.7) {
135 // Sometimes add a random tangent
136 const tangents = [
137 ' Wait, this reminds me of something that happened in 1987...',
138 ' Oh, my cat is meowing. One second dear.',
139 ' Where did I put my glasses?'
140 ];
141 reply += tangents[Math.floor(Math.random() * tangents.length)];
142 }
143
144 return reply;
145 } catch (error) {
146 console.error('Error generating enhanced response:', error);
147
148 // Context-aware fallbacks
149 const fallbacks = suggestions && suggestions.length > 0
150 ? suggestions.map(s => s.response)
151 : [
152 "I'm sorry, what did you say? I'm having trouble with this computer.",
153 "Can you explain that again? These modern things confuse me.",
154 "Oh dear, I think I clicked the wrong button. What were we talking about?"
155 ];
156
157 return fallbacks[Math.floor(Math.random() * fallbacks.length)];
158 }
159 }
160
161 // Routes
162
163 // Health check / status
164 app.get('/status', async (req, res) => {
165 const ollamaConnected = await checkOllamaConnection();
166 res.json({
167 status: 'running',
168 version: '0.2.0',
169 ollamaConnected,
170 personalities: Object.values(personalities).map(p => ({
171 id: p.id,
172 name: p.name
173 })),
174 stats: conversationManager.getStatistics()
175 });
176 });
177
178 // Add message to conversation
179 app.post('/conversation/add', (req, res) => {
180 const { chatId, platform, message } = req.body;
181
182 const conversation = conversationManager.addMessage(chatId, message, platform);
183 const context = conversationManager.generateContextSummary(chatId, platform);
184
185 res.json(context);
186 });
187
188 // Get response suggestions
189 app.post('/suggestions', (req, res) => {
190 const { chatId, platform } = req.body;
191
192 const suggestions = conversationManager.getResponseSuggestions(chatId, platform);
193
194 res.json(suggestions);
195 });
196
197 // Enhanced generate response endpoint
198 app.post('/generate', async (req, res) => {
199 const {
200 message,
201 personality = 'default',
202 chatId = 'unknown',
203 platform = 'whatsapp',
204 context,
205 suggestions,
206 timestamp
207 } = req.body;
208
209 if (!message) {
210 return res.status(400).json({ error: 'Message is required' });
211 }
212
213 console.log(`[${new Date().toISOString()}] Generating response for ${platform}:${chatId}`);
214
215 try {
216 // Add message to conversation manager
217 conversationManager.addMessage(chatId, {
218 content: message,
219 sender: 'them',
220 type: 'text',
221 timestamp: new Date(timestamp)
222 }, platform);
223
224 // Generate enhanced response
225 const reply = await generateEnhancedResponse(
226 message,
227 personality,
228 chatId,
229 platform,
230 context,
231 suggestions
232 );
233
234 // Add our response to conversation
235 conversationManager.addMessage(chatId, {
236 content: reply,
237 sender: 'us',
238 type: 'text',
239 timestamp: new Date()
240 }, platform);
241
242 console.log(`Generated reply: ${reply}`);
243
244 res.json({
245 reply,
246 personality,
247 timestamp: new Date().toISOString(),
248 context: conversationManager.generateContextSummary(chatId, platform)
249 });
250 } catch (error) {
251 console.error('Error in /generate:', error);
252 res.status(500).json({ error: 'Failed to generate response' });
253 }
254 });
255
256 // Export conversation
257 app.post('/conversation/export', (req, res) => {
258 const { chatId, platform = 'whatsapp' } = req.body;
259
260 try {
261 const exportData = conversationManager.exportConversation(chatId, platform);
262 res.json(exportData);
263 } catch (error) {
264 console.error('Error exporting conversation:', error);
265 res.status(500).json({ error: 'Failed to export conversation' });
266 }
267 });
268
269 // Get statistics
270 app.get('/statistics', (req, res) => {
271 const stats = conversationManager.getStatistics();
272 res.json(stats);
273 });
274
275 // Get conversation history
276 app.get('/conversations/:platform/:chatId', (req, res) => {
277 const { platform, chatId } = req.params;
278 const conversation = conversationManager.getConversation(chatId, platform);
279
280 res.json({
281 chatId,
282 platform,
283 messages: conversation.messages,
284 context: conversation.context,
285 state: conversation.state
286 });
287 });
288
289 // Clear conversation
290 app.delete('/conversations/:platform/:chatId', (req, res) => {
291 const { platform, chatId } = req.params;
292 const key = `${platform}:${chatId}`;
293
294 // This would need to be implemented in ConversationManager
295 // For now, just clear from the conversation
296 const conversation = conversationManager.getConversation(chatId, platform);
297 conversation.messages = [];
298 conversation.context.responseCount = 0;
299
300 res.json({ message: 'Conversation cleared' });
301 });
302
303 // Get all personalities with details
304 app.get('/personalities', (req, res) => {
305 res.json(Object.values(personalities));
306 });
307
308 // Get specific personality
309 app.get('/personalities/:id', (req, res) => {
310 const { id } = req.params;
311 const personality = personalities[id];
312
313 if (!personality) {
314 return res.status(404).json({ error: 'Personality not found' });
315 }
316
317 res.json(personality);
318 });
319
320 // Add new personality (for future UI)
321 app.post('/personalities', async (req, res) => {
322 const { id, name, systemPrompt, temperature } = req.body;
323
324 if (!id || !name || !systemPrompt) {
325 return res.status(400).json({ error: 'Missing required fields' });
326 }
327
328 const personality = {
329 id,
330 name,
331 systemPrompt,
332 temperature: temperature || 0.8
333 };
334
335 personalities[id] = personality;
336
337 // Save to file
338 try {
339 const filePath = path.join(__dirname, '..', 'personalities', `${id}.json`);
340 await fs.writeFile(filePath, JSON.stringify(personality, null, 2));
341 res.json({ success: true, personality });
342 } catch (error) {
343 console.error('Error saving personality:', error);
344 res.status(500).json({ error: 'Failed to save personality' });
345 }
346 });
347
348 // Cleanup old conversations periodically
349 setInterval(() => {
350 conversationManager.cleanup();
351 }, 60 * 60 * 1000); // Every hour
352
353 // Start server
354 async function start() {
355 await loadPersonalities();
356 await checkOllamaConnection();
357
358 app.listen(PORT, () => {
359 console.log(`
360 ╔══════════════════════════════════════╗
361 ║ LooseCannon Server v0.2.0 ║
362 ║ Listening on port ${PORT}
363 ╠══════════════════════════════════════╣
364 ║ Features: ║
365 ║ ✓ Multi-platform support ║
366 ║ ✓ Conversation management ║
367 ║ ✓ Scammer detection ║
368 ║ ✓ Context-aware responses ║
369 ║ ║
370 ║ Extension: Connect to ║
371 ║ http://localhost:${PORT}
372 ║ ║
373 ║ Ollama: ${OLLAMA_URL.padEnd(28)}
374 ╚══════════════════════════════════════╝
375
376 🤖 Ready to confuse scammers across all platforms!
377 `);
378 });
379 }
380
381 // Handle graceful shutdown
382 process.on('SIGINT', () => {
383 console.log('\n\nShutting down LooseCannon server...');
384 console.log('Statistics:', conversationManager.getStatistics());
385 process.exit(0);
386 });
387
388 // Handle uncaught errors
389 process.on('uncaughtException', (error) => {
390 console.error('Uncaught Exception:', error);
391 });
392
393 process.on('unhandledRejection', (reason, promise) => {
394 console.error('Unhandled Rejection at:', promise, 'reason:', reason);
395 });
396
397 start();