feat: implement feedback system and integration tests for BuddAI

This commit is contained in:
JamesTheGiblet 2025-12-29 16:03:21 +00:00
parent 66095285a6
commit 036dabbb00
6 changed files with 314 additions and 20 deletions

View file

@ -6,7 +6,7 @@
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Status: PRODUCTION](https://img.shields.io/badge/Status-PRODUCTION-green.svg)](https://github.com/JamesTheGiblet/BuddAI)
[![Version: v3.1](https://img.shields.io/badge/Version-v3.1-blue.svg)](https://github.com/JamesTheGiblet/BuddAI/releases)
[![Tests: 11/11](https://img.shields.io/badge/Tests-11%2F11%20Passing-brightgreen.svg)](https://github.com/JamesTheGiblet/BuddAI/actions)
[![Tests: 24/24](https://img.shields.io/badge/Tests-24%2F24%20Passing-brightgreen.svg)](https://github.com/JamesTheGiblet/BuddAI/actions)
---
@ -976,7 +976,11 @@ void updateLEDPattern() {
### Run the Test Suite
```bash
# Unit Tests
python tests/test_buddai.py
# Integration Tests
python tests/test_integration.py
```
### Test Coverage (11/11 Passing)

View file

@ -283,6 +283,7 @@ class BuddAI:
def __init__(self, user_id: str = "default", server_mode: bool = False):
self.user_id = user_id
self.last_generated_id = None
self.ensure_data_dir()
self.init_database()
self.session_id = self.create_session()
@ -368,6 +369,15 @@ class BuddAI:
except sqlite3.OperationalError:
pass
cursor.execute("""
CREATE TABLE IF NOT EXISTS feedback (
id INTEGER PRIMARY KEY AUTOINCREMENT,
message_id INTEGER,
positive BOOLEAN,
timestamp TIMESTAMP
)
""")
conn.commit()
conn.close()
@ -404,15 +414,17 @@ class BuddAI:
conn.commit()
conn.close()
def save_message(self, role: str, content: str) -> None:
def save_message(self, role: str, content: str) -> int:
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute(
"INSERT INTO messages (session_id, role, content, timestamp) VALUES (?, ?, ?, ?)",
(self.session_id, role, content, datetime.now().isoformat())
)
msg_id = cursor.lastrowid
conn.commit()
conn.close()
return msg_id
def index_local_repositories(self, root_path: str) -> None:
"""Crawl directories and index .py, .ino, and .cpp files"""
@ -833,6 +845,25 @@ float applyForge(float current, float target, float k) {{ return target + (curre
return generated_code
def record_feedback(self, message_id: int, feedback: bool) -> None:
"""Learn from user feedback."""
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute("""
INSERT INTO feedback (message_id, positive, timestamp)
VALUES (?, ?, ?)
""", (message_id, feedback, datetime.now().isoformat()))
conn.commit()
conn.close()
# Adjust confidence scores
self.update_style_confidence(message_id, feedback)
def update_style_confidence(self, message_id: int, positive: bool) -> None:
"""Adjust confidence of style preferences based on feedback."""
# Placeholder for V4.0 learning loop
pass
def _route_request(self, user_message: str, force_model: Optional[str], forge_mode: str) -> str:
"""Route the request to the appropriate model or handler."""
# Determine model based on complexity
@ -865,8 +896,8 @@ float applyForge(float current, float target, float k) {{ return target + (curre
if style_context:
self.context_messages.append({"role": "system", "content": style_context})
self.save_message("user", user_message)
self.context_messages.append({"role": "user", "content": user_message, "timestamp": datetime.now().isoformat()})
user_msg_id = self.save_message("user", user_message)
self.context_messages.append({"id": user_msg_id, "role": "user", "content": user_message, "timestamp": datetime.now().isoformat()})
full_response = ""
@ -899,8 +930,9 @@ float applyForge(float current, float target, float k) {{ return target + (curre
full_response += bar
yield bar
self.save_message("assistant", full_response)
self.context_messages.append({"role": "assistant", "content": full_response, "timestamp": datetime.now().isoformat()})
msg_id = self.save_message("assistant", full_response)
self.last_generated_id = msg_id
self.context_messages.append({"id": msg_id, "role": "assistant", "content": full_response, "timestamp": datetime.now().isoformat()})
# --- Main Chat Method ---
def chat(self, user_message: str, force_model: Optional[str] = None, forge_mode: str = "2") -> str:
@ -909,17 +941,17 @@ float applyForge(float current, float target, float k) {{ return target + (curre
if style_context:
self.context_messages.append({"role": "system", "content": style_context})
self.save_message("user", user_message)
self.context_messages.append({"role": "user", "content": user_message, "timestamp": datetime.now().isoformat()})
user_msg_id = self.save_message("user", user_message)
self.context_messages.append({"id": user_msg_id, "role": "user", "content": user_message, "timestamp": datetime.now().isoformat()})
# Direct Schedule Check
if "what should i be doing" in user_message.lower() or "my schedule" in user_message.lower() or "schedule check" in user_message.lower():
status = self.get_user_status()
response = f"📅 **Schedule Check**\nAccording to your protocol, you should be: **{status}**"
print(f"⏰ Schedule check triggered: {status}")
self.save_message("assistant", response)
self.context_messages.append({"role": "assistant", "content": response, "timestamp": datetime.now().isoformat()})
msg_id = self.save_message("assistant", response)
self.last_generated_id = msg_id
self.context_messages.append({"id": msg_id, "role": "assistant", "content": response, "timestamp": datetime.now().isoformat()})
return response
response = self._route_request(user_message, force_model, forge_mode)
@ -933,8 +965,9 @@ float applyForge(float current, float target, float k) {{ return target + (curre
bar = "\n\nPROACTIVE: > " + " ".join([f"{i+1}. {s}" for i, s in enumerate(suggestions)])
response += bar
self.save_message("assistant", response)
self.context_messages.append({"role": "assistant", "content": response, "timestamp": datetime.now().isoformat()})
msg_id = self.save_message("assistant", response)
self.last_generated_id = msg_id
self.context_messages.append({"id": msg_id, "role": "assistant", "content": response, "timestamp": datetime.now().isoformat()})
return response
@ -975,15 +1008,15 @@ float applyForge(float current, float target, float k) {{ return target + (curre
conn.close()
return []
cursor.execute("SELECT role, content, timestamp FROM messages WHERE session_id = ? ORDER BY id ASC", (session_id,))
cursor.execute("SELECT id, role, content, timestamp FROM messages WHERE session_id = ? ORDER BY id ASC", (session_id,))
rows = cursor.fetchall()
conn.close()
self.session_id = session_id
self.context_messages = []
loaded_history = []
for role, content, ts in rows:
msg = {"role": role, "content": content, "timestamp": ts}
for msg_id, role, content, ts in rows:
msg = {"id": msg_id, "role": role, "content": content, "timestamp": ts}
self.context_messages.append(msg)
loaded_history.append(msg)
return loaded_history
@ -1049,7 +1082,6 @@ float applyForge(float current, float target, float k) {{ return target + (curre
# --- Server Implementation ---
if SERVER_AVAILABLE:
app = FastAPI(title="BuddAI API", version="3.1")
app = FastAPI(title="BuddAI API", version="3.2")
# Allow React frontend to communicate
@ -1075,6 +1107,10 @@ if SERVER_AVAILABLE:
class SessionDeleteRequest(BaseModel):
session_id: str
class FeedbackRequest(BaseModel):
message_id: int
positive: bool
# Multi-user support
class BuddAIManager:
def __init__(self):
@ -1174,7 +1210,7 @@ if SERVER_AVAILABLE:
async def chat_endpoint(request: ChatRequest, user_id: str = Header("default")):
server_buddai = buddai_manager.get_instance(user_id)
response = server_buddai.chat(request.message, force_model=request.model, forge_mode=request.forge_mode)
return {"response": response}
return {"response": response, "message_id": server_buddai.last_generated_id}
@app.websocket("/api/ws/chat")
async def websocket_endpoint(websocket: WebSocket):
@ -1192,10 +1228,16 @@ if SERVER_AVAILABLE:
for chunk in server_buddai.chat_stream(user_message, model, forge_mode):
await websocket.send_json({"type": "token", "content": chunk})
await websocket.send_json({"type": "end"})
await websocket.send_json({"type": "end", "message_id": server_buddai.last_generated_id})
except WebSocketDisconnect:
pass
@app.post("/api/feedback")
async def feedback_endpoint(req: FeedbackRequest, user_id: str = Header("default")):
server_buddai = buddai_manager.get_instance(user_id)
server_buddai.record_feedback(req.message_id, req.positive)
return {"status": "success"}
@app.get("/api/history")
async def history_endpoint(user_id: str = Header("default")):
server_buddai = buddai_manager.get_instance(user_id)

View file

@ -205,6 +205,7 @@
const [showSidebar, setShowSidebar] = useState(true);
const [editingSession, setEditingSession] = useState(null);
const [renameText, setRenameText] = useState("");
const [feedbackGiven, setFeedbackGiven] = useState({});
const [input, setInput] = useState("");
const [loading, setLoading] = useState(false);
const [status, setStatus] = useState("connecting");
@ -307,6 +308,24 @@
fetchSessions();
};
const handleFeedback = async (messageId, positive) => {
if (!messageId || feedbackGiven[messageId]) return;
setFeedbackGiven(prev => ({ ...prev, [messageId]: positive ? 'positive' : 'negative' }));
try {
await fetch("/api/feedback", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ message_id: messageId, positive: positive })
});
} catch (e) {
console.error("Feedback submission failed", e);
// Revert UI on failure
setFeedbackGiven(prev => { const n = {...prev}; delete n[messageId]; return n; });
}
};
const loadSession = async (sessionId) => {
setLoading(true);
try {
@ -363,6 +382,14 @@
});
} else if (data.type === 'end') {
setLoading(false);
setHistory(prev => {
const newHistory = [...prev];
const lastMsg = newHistory[newHistory.length - 1];
if (lastMsg && lastMsg.role === 'assistant') {
lastMsg.id = data.message_id;
}
return newHistory;
});
if (!currentSessionId) fetchSessions();
}
};
@ -375,7 +402,7 @@
body: JSON.stringify({ message: msgText, forge_mode: forgeMode })
});
const data = await res.json();
setHistory(prev => [...prev, { role: "assistant", content: data.response }]);
setHistory(prev => [...prev, { role: "assistant", content: data.response, id: data.message_id }]);
if (!currentSessionId) fetchSessions();
} catch (err) {
setHistory(prev => [...prev, { role: "assistant", content: "Error connecting to BuddAI server." }]);
@ -503,6 +530,22 @@
))}
</div>
)}
{msg.role === 'assistant' && msg.id && !loading && (
<div className="feedback-btns">
<button
className={`feedback-btn ${feedbackGiven[msg.id] === 'positive' ? 'selected' : ''}`}
onClick={() => handleFeedback(msg.id, true)}
disabled={!!feedbackGiven[msg.id]}
title="Good response"
>👍</button>
<button
className={`feedback-btn ${feedbackGiven[msg.id] === 'negative' ? 'selected' : ''}`}
onClick={() => handleFeedback(msg.id, false)}
disabled={!!feedbackGiven[msg.id]}
title="Bad response"
>👎</button>
</div>
)}
</div>
);
})}

View file

@ -922,6 +922,48 @@ def test_connection_pool():
print_fail(f"Pool overflow handling failed. Size: {pool.pool.qsize()}")
return False
# Test 20: Feedback System
def test_feedback_system():
print_test("Feedback System")
# Use a named temporary file for DB
fd, test_db_path = tempfile.mkstemp(suffix=".db")
os.close(fd)
test_db = Path(test_db_path)
try:
with patch('buddai_v3_2.DB_PATH', test_db):
# Suppress prints
with patch('builtins.print'):
buddai = BuddAI(server_mode=False)
# 1. Create a message to rate
msg_id = buddai.save_message("assistant", "Test response")
# 2. Record positive feedback
buddai.record_feedback(msg_id, True)
# 3. Verify in DB
conn = sqlite3.connect(test_db)
cursor = conn.cursor()
cursor.execute("SELECT positive FROM feedback WHERE message_id = ?", (msg_id,))
row = cursor.fetchone()
conn.close()
if row and row[0] == 1: # Boolean true is 1 in sqlite
print_pass("Positive feedback recorded successfully")
return True
else:
print_fail(f"Feedback not recorded correctly. Got: {row}")
return False
finally:
try:
if test_db.exists():
os.unlink(test_db)
except Exception:
pass
# Main Test Runner
def run_all_tests():
print("\n" + "="*60)
@ -948,6 +990,7 @@ def run_all_tests():
("Upload Security", test_upload_security),
("WebSocket Logic", test_websocket_logic),
("Connection Pooling", test_connection_pool),
("Feedback System", test_feedback_system),
]
results = []

162
tests/test_integration.py Normal file
View file

@ -0,0 +1,162 @@
#!/usr/bin/env python3
"""
BuddAI v3.2 Integration Test Suite
Tests API endpoints and server integration
Author: James Gilbert
License: MIT
"""
import sys
import os
import importlib.util
import tempfile
import unittest
from unittest.mock import patch, MagicMock
from pathlib import Path
import json
# Dynamic import of buddai_v3.2.py
REPO_ROOT = Path(__file__).parent.parent
MODULE_PATH = REPO_ROOT / "buddai_v3.2.py"
spec = importlib.util.spec_from_file_location("buddai_v3_2", MODULE_PATH)
buddai_module = importlib.util.module_from_spec(spec)
sys.modules["buddai_v3_2"] = buddai_module
spec.loader.exec_module(buddai_module)
# Check for server dependencies
SERVER_AVAILABLE = getattr(buddai_module, "SERVER_AVAILABLE", False)
if SERVER_AVAILABLE:
from fastapi.testclient import TestClient
app = buddai_module.app
client = TestClient(app)
else:
print("⚠️ Server dependencies missing. Integration tests skipped.")
@unittest.skipUnless(SERVER_AVAILABLE, "Server dependencies not installed")
class TestBuddAIIntegration(unittest.TestCase):
def setUp(self):
# Create a fresh temp DB for each test
self.db_fd, self.db_path = tempfile.mkstemp(suffix=".db")
os.close(self.db_fd)
# Patch DB_PATH in the module
self.db_patcher = patch("buddai_v3_2.DB_PATH", Path(self.db_path))
self.mock_db_path = self.db_patcher.start()
# Reset the manager to ensure fresh BuddAI instances connected to temp DB
if hasattr(buddai_module, 'buddai_manager'):
buddai_module.buddai_manager.instances = {}
# Suppress prints
self.print_patcher = patch("builtins.print")
self.print_patcher.start()
def tearDown(self):
self.db_patcher.stop()
self.print_patcher.stop()
try:
os.unlink(self.db_path)
except:
pass
def test_health_check(self):
"""GET / returns 200 and status"""
response = client.get("/")
self.assertEqual(response.status_code, 200)
self.assertIn("BuddAI API Online", response.text)
def test_chat_flow(self):
"""POST /api/chat returns response"""
# Mock the internal chat method to avoid Ollama dependency
with patch.object(buddai_module.BuddAI, 'chat', return_value="Integrated Response") as mock_chat:
response = client.post("/api/chat", json={"message": "Hello API"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"response": "Integrated Response", "message_id": None})
# Verify user_id header handling (default)
mock_chat.assert_called_once()
def test_session_lifecycle_api(self):
"""Test full session CRUD via API"""
# 1. Create
resp = client.post("/api/session/new")
self.assertEqual(resp.status_code, 200)
session_id = resp.json()["session_id"]
# 2. List
resp = client.get("/api/sessions")
self.assertEqual(resp.status_code, 200)
sessions = resp.json()["sessions"]
self.assertTrue(any(s["id"] == session_id for s in sessions))
# 3. Rename
new_title = "API Test Session"
resp = client.post("/api/session/rename", json={"session_id": session_id, "title": new_title})
self.assertEqual(resp.status_code, 200)
resp = client.get("/api/sessions")
updated_session = next(s for s in resp.json()["sessions"] if s["id"] == session_id)
self.assertEqual(updated_session["title"], new_title)
# 4. Load
resp = client.post("/api/session/load", json={"session_id": session_id})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json()["session_id"], session_id)
# 5. Delete
resp = client.post("/api/session/delete", json={"session_id": session_id})
self.assertEqual(resp.status_code, 200)
resp = client.get("/api/sessions")
self.assertFalse(any(s["id"] == session_id for s in resp.json()["sessions"]))
def test_multi_user_isolation_api(self):
"""Verify data isolation between users via API headers"""
user1_headers = {"user-id": "user1"}
user2_headers = {"user-id": "user2"}
# User 1 creates session
resp1 = client.post("/api/session/new", headers=user1_headers)
sid1 = resp1.json()["session_id"]
client.post("/api/session/rename", json={"session_id": sid1, "title": "User1 Chat"}, headers=user1_headers)
# User 2 creates session
resp2 = client.post("/api/session/new", headers=user2_headers)
sid2 = resp2.json()["session_id"]
client.post("/api/session/rename", json={"session_id": sid2, "title": "User2 Chat"}, headers=user2_headers)
# Verify User 1 sees only their session
list1 = client.get("/api/sessions", headers=user1_headers).json()["sessions"]
ids1 = [s["id"] for s in list1]
self.assertIn(sid1, ids1)
self.assertNotIn(sid2, ids1)
# Verify User 2 sees only their session
list2 = client.get("/api/sessions", headers=user2_headers).json()["sessions"]
ids2 = [s["id"] for s in list2]
self.assertIn(sid2, ids2)
self.assertNotIn(sid1, ids2)
def test_upload_api(self):
"""Test file upload endpoint"""
with tempfile.TemporaryDirectory() as tmp_data_dir:
with patch("buddai_v3_2.DATA_DIR", Path(tmp_data_dir)):
# Mock indexing to avoid parsing logic
with patch.object(buddai_module.BuddAI, 'index_local_repositories') as mock_index:
# Create dummy file
files = {'file': ('test.py', b'print("hello")', 'text/x-python')}
response = client.post("/api/upload", files=files)
self.assertEqual(response.status_code, 200)
self.assertIn("Successfully indexed", response.json()["message"])
mock_index.assert_called()
if __name__ == '__main__':
print("\n" + "="*60)
print("🚀 BuddAI v3.2 Integration Tests")
print("="*60)
unittest.main(verbosity=2)