Add comprehensive tests for error handling, input validation, search filtering, and UI components
Some checks failed
Build and Push Docker Image / build-and-push (push) Has been cancelled
Some checks failed
Build and Push Docker Image / build-and-push (push) Has been cancelled
- Implemented unit tests for the ErrorHandler class, covering error handling, frequency tracking, and performance warnings. - Created integration tests for input validation, error handling, auto-save functionality, and search/filter systems. - Developed unit tests for the DataFilter, QuickFilters, and SearchHistory classes to ensure filtering logic works as expected. - Added tests for the SearchFilterWidget UI component, verifying initialization, filter functionality, and responsiveness. - Included edge case tests for error handling without UI manager and handling of None values.
This commit is contained in:
253
tests/test_auto_save.py
Normal file
253
tests/test_auto_save.py
Normal file
@@ -0,0 +1,253 @@
|
||||
"""Tests for auto-save and backup system."""
|
||||
|
||||
import pytest
|
||||
import tempfile
|
||||
import os
|
||||
import shutil
|
||||
from unittest.mock import MagicMock, patch
|
||||
from datetime import datetime
|
||||
import pandas as pd
|
||||
|
||||
from src.auto_save import AutoSaveManager
|
||||
|
||||
|
||||
class TestAutoSaveManager:
|
||||
"""Test cases for AutoSaveManager class."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create temporary directories for testing
|
||||
self.test_dir = tempfile.mkdtemp()
|
||||
self.backup_dir = os.path.join(self.test_dir, "backups")
|
||||
self.test_data_file = os.path.join(self.test_dir, "test_data.csv")
|
||||
|
||||
# Create test data file
|
||||
test_data = pd.DataFrame({
|
||||
'Date': ['2024-01-01', '2024-01-02'],
|
||||
'Notes': ['Test note 1', 'Test note 2']
|
||||
})
|
||||
test_data.to_csv(self.test_data_file, index=False)
|
||||
|
||||
# Mock callbacks
|
||||
self.mock_status_callback = MagicMock()
|
||||
self.mock_error_callback = MagicMock()
|
||||
|
||||
# Create AutoSaveManager instance
|
||||
self.auto_save = AutoSaveManager(
|
||||
data_file_path=self.test_data_file,
|
||||
backup_dir=self.backup_dir,
|
||||
status_callback=self.mock_status_callback,
|
||||
error_callback=self.mock_error_callback,
|
||||
interval_minutes=0.1, # Very short interval for testing
|
||||
max_backups=3
|
||||
)
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up test fixtures."""
|
||||
if hasattr(self, 'auto_save'):
|
||||
self.auto_save.stop()
|
||||
if os.path.exists(self.test_dir):
|
||||
shutil.rmtree(self.test_dir)
|
||||
|
||||
def test_initialization(self):
|
||||
"""Test AutoSaveManager initialization."""
|
||||
assert self.auto_save.data_file_path == self.test_data_file
|
||||
assert self.auto_save.backup_dir == self.backup_dir
|
||||
assert self.auto_save.interval_minutes == 0.1
|
||||
assert self.auto_save.max_backups == 3
|
||||
assert not self.auto_save.is_running
|
||||
|
||||
def test_backup_directory_creation(self):
|
||||
"""Test that backup directory is created."""
|
||||
# Directory should be created during initialization
|
||||
assert os.path.exists(self.backup_dir)
|
||||
assert os.path.isdir(self.backup_dir)
|
||||
|
||||
def test_create_backup(self):
|
||||
"""Test backup creation."""
|
||||
backup_file = self.auto_save.create_backup("test_backup")
|
||||
|
||||
# Verify backup file exists
|
||||
assert os.path.exists(backup_file)
|
||||
assert backup_file.startswith(self.backup_dir)
|
||||
assert "test_backup" in backup_file
|
||||
|
||||
# Verify backup content matches original
|
||||
original_data = pd.read_csv(self.test_data_file)
|
||||
backup_data = pd.read_csv(backup_file)
|
||||
pd.testing.assert_frame_equal(original_data, backup_data)
|
||||
|
||||
def test_create_backup_nonexistent_file(self):
|
||||
"""Test backup creation when source file doesn't exist."""
|
||||
auto_save = AutoSaveManager(
|
||||
data_file_path="/nonexistent/file.csv",
|
||||
backup_dir=self.backup_dir,
|
||||
status_callback=self.mock_status_callback,
|
||||
error_callback=self.mock_error_callback
|
||||
)
|
||||
|
||||
backup_file = auto_save.create_backup("test")
|
||||
assert backup_file is None
|
||||
|
||||
# Error callback should have been called
|
||||
self.mock_error_callback.assert_called()
|
||||
|
||||
def test_cleanup_old_backups(self):
|
||||
"""Test cleanup of old backups."""
|
||||
# Create more backups than max_backups
|
||||
backup_files = []
|
||||
for i in range(5):
|
||||
backup_file = self.auto_save.create_backup(f"test_{i}")
|
||||
backup_files.append(backup_file)
|
||||
|
||||
# Perform cleanup
|
||||
self.auto_save._cleanup_old_backups()
|
||||
|
||||
# Should only have max_backups files remaining
|
||||
remaining_files = [f for f in backup_files if os.path.exists(f)]
|
||||
assert len(remaining_files) <= self.auto_save.max_backups
|
||||
|
||||
def test_start_and_stop(self):
|
||||
"""Test starting and stopping auto-save."""
|
||||
# Start auto-save
|
||||
self.auto_save.start()
|
||||
assert self.auto_save.is_running
|
||||
|
||||
# Stop auto-save
|
||||
self.auto_save.stop()
|
||||
assert not self.auto_save.is_running
|
||||
|
||||
def test_get_backup_files(self):
|
||||
"""Test getting list of backup files."""
|
||||
# Create some backups
|
||||
self.auto_save.create_backup("backup1")
|
||||
self.auto_save.create_backup("backup2")
|
||||
|
||||
backup_files = self.auto_save.get_backup_files()
|
||||
|
||||
assert len(backup_files) >= 2
|
||||
assert all(os.path.exists(f) for f in backup_files)
|
||||
assert all(f.endswith('.csv') for f in backup_files)
|
||||
|
||||
def test_restore_from_backup(self):
|
||||
"""Test restoring from backup."""
|
||||
# Create a backup
|
||||
backup_file = self.auto_save.create_backup("test_restore")
|
||||
|
||||
# Modify original file
|
||||
modified_data = pd.DataFrame({
|
||||
'Date': ['2024-01-03'],
|
||||
'Notes': ['Modified note']
|
||||
})
|
||||
modified_data.to_csv(self.test_data_file, index=False)
|
||||
|
||||
# Restore from backup
|
||||
success = self.auto_save.restore_from_backup(backup_file)
|
||||
assert success
|
||||
|
||||
# Verify restoration
|
||||
restored_data = pd.read_csv(self.test_data_file)
|
||||
assert len(restored_data) == 2 # Original had 2 rows
|
||||
assert 'Test note 1' in restored_data['Notes'].values
|
||||
|
||||
def test_restore_from_nonexistent_backup(self):
|
||||
"""Test restoring from nonexistent backup."""
|
||||
success = self.auto_save.restore_from_backup("/nonexistent/backup.csv")
|
||||
assert not success
|
||||
self.mock_error_callback.assert_called()
|
||||
|
||||
def test_backup_filename_format(self):
|
||||
"""Test backup filename format."""
|
||||
backup_file = self.auto_save.create_backup("test_format")
|
||||
|
||||
filename = os.path.basename(backup_file)
|
||||
|
||||
# Should contain source filename, suffix, and timestamp
|
||||
assert "test_data" in filename
|
||||
assert "test_format" in filename
|
||||
assert filename.endswith('.csv')
|
||||
|
||||
# Should have timestamp in format
|
||||
assert len(filename.split('_')) >= 4 # name_suffix_date_time.csv
|
||||
|
||||
def test_backup_with_special_characters(self):
|
||||
"""Test backup creation with special characters in suffix."""
|
||||
backup_file = self.auto_save.create_backup("test with spaces & symbols!")
|
||||
|
||||
assert os.path.exists(backup_file)
|
||||
# Special characters should be handled appropriately
|
||||
assert os.path.isfile(backup_file)
|
||||
|
||||
def test_concurrent_backup_operations(self):
|
||||
"""Test that concurrent backup operations don't interfere."""
|
||||
# This tests thread safety (basic test)
|
||||
backup1 = self.auto_save.create_backup("concurrent1")
|
||||
backup2 = self.auto_save.create_backup("concurrent2")
|
||||
|
||||
assert backup1 != backup2
|
||||
assert os.path.exists(backup1)
|
||||
assert os.path.exists(backup2)
|
||||
|
||||
def test_error_handling_during_backup(self):
|
||||
"""Test error handling during backup operations."""
|
||||
# Test with permission error
|
||||
with patch('shutil.copy2', side_effect=PermissionError("Permission denied")):
|
||||
backup_file = self.auto_save.create_backup("permission_test")
|
||||
assert backup_file is None
|
||||
self.mock_error_callback.assert_called()
|
||||
|
||||
def test_auto_save_integration(self):
|
||||
"""Test integration of auto-save functionality."""
|
||||
# Start auto-save
|
||||
self.auto_save.start()
|
||||
|
||||
# Wait a short time for at least one auto-save cycle
|
||||
import time
|
||||
time.sleep(0.2) # Wait longer than interval
|
||||
|
||||
# Should have created startup backup
|
||||
backup_files = self.auto_save.get_backup_files()
|
||||
assert len(backup_files) > 0
|
||||
|
||||
# Stop auto-save
|
||||
self.auto_save.stop()
|
||||
|
||||
def test_status_callback_integration(self):
|
||||
"""Test status callback integration."""
|
||||
self.auto_save.create_backup("status_test")
|
||||
|
||||
# Status callback should have been called
|
||||
self.mock_status_callback.assert_called()
|
||||
call_args = self.mock_status_callback.call_args[0]
|
||||
assert "backup" in call_args[0].lower()
|
||||
|
||||
def test_backup_size_validation(self):
|
||||
"""Test that backups have reasonable size."""
|
||||
backup_file = self.auto_save.create_backup("size_test")
|
||||
|
||||
original_size = os.path.getsize(self.test_data_file)
|
||||
backup_size = os.path.getsize(backup_file)
|
||||
|
||||
# Backup should be similar size to original (allowing for minor differences)
|
||||
assert abs(backup_size - original_size) < 100 # Within 100 bytes
|
||||
|
||||
def test_backup_file_sorting(self):
|
||||
"""Test that backup files are sorted by creation time."""
|
||||
# Create backups with small delays
|
||||
import time
|
||||
backup1 = self.auto_save.create_backup("first")
|
||||
time.sleep(0.01)
|
||||
backup2 = self.auto_save.create_backup("second")
|
||||
time.sleep(0.01)
|
||||
backup3 = self.auto_save.create_backup("third")
|
||||
|
||||
backup_files = self.auto_save.get_backup_files()
|
||||
|
||||
# Files should be sorted with newest first
|
||||
assert len(backup_files) >= 3
|
||||
|
||||
# Check that the files are in the list (order might vary based on filesystem)
|
||||
backup_names = [os.path.basename(f) for f in backup_files]
|
||||
assert any("first" in name for name in backup_names)
|
||||
assert any("second" in name for name in backup_names)
|
||||
assert any("third" in name for name in backup_names)
|
||||
169
tests/test_error_handler.py
Normal file
169
tests/test_error_handler.py
Normal file
@@ -0,0 +1,169 @@
|
||||
"""Tests for error handling system."""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
import time
|
||||
import logging
|
||||
|
||||
from src.error_handler import ErrorHandler, OperationTimer
|
||||
|
||||
|
||||
class TestErrorHandler:
|
||||
"""Test cases for ErrorHandler class."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Set up test fixtures before each test method."""
|
||||
self.mock_logger = MagicMock()
|
||||
self.mock_ui_manager = MagicMock()
|
||||
self.error_handler = ErrorHandler(self.mock_logger, self.mock_ui_manager)
|
||||
|
||||
def test_error_handler_initialization(self):
|
||||
"""Test ErrorHandler initializes correctly."""
|
||||
assert self.error_handler.logger == self.mock_logger
|
||||
assert self.error_handler.ui_manager == self.mock_ui_manager
|
||||
assert self.error_handler.error_counts == {}
|
||||
assert self.error_handler.last_error_time == {}
|
||||
|
||||
def test_handle_error_basic(self):
|
||||
"""Test basic error handling."""
|
||||
error = ValueError("Test error")
|
||||
self.error_handler.handle_error(error, "Test context")
|
||||
|
||||
# Verify logging
|
||||
self.mock_logger.error.assert_called_once()
|
||||
|
||||
# Verify UI feedback if show_dialog is True
|
||||
self.mock_ui_manager.show_error_dialog.assert_called_once()
|
||||
|
||||
def test_handle_error_without_dialog(self):
|
||||
"""Test error handling without showing dialog."""
|
||||
error = ValueError("Test error")
|
||||
self.error_handler.handle_error(error, "Test context", show_dialog=False)
|
||||
|
||||
# Verify logging
|
||||
self.mock_logger.error.assert_called_once()
|
||||
|
||||
# Verify no UI dialog
|
||||
self.mock_ui_manager.show_error_dialog.assert_not_called()
|
||||
|
||||
def test_handle_error_with_custom_message(self):
|
||||
"""Test error handling with custom user message."""
|
||||
error = ValueError("Test error")
|
||||
custom_message = "Custom error message"
|
||||
self.error_handler.handle_error(error, "Test context", user_message=custom_message)
|
||||
|
||||
# Verify custom message is used
|
||||
self.mock_ui_manager.show_error_dialog.assert_called_once()
|
||||
args = self.mock_ui_manager.show_error_dialog.call_args[0]
|
||||
assert custom_message in args[0]
|
||||
|
||||
def test_error_frequency_tracking(self):
|
||||
"""Test that error frequency is tracked correctly."""
|
||||
error = ValueError("Test error")
|
||||
context = "Test context"
|
||||
|
||||
# Handle same error multiple times
|
||||
self.error_handler.handle_error(error, context)
|
||||
self.error_handler.handle_error(error, context)
|
||||
self.error_handler.handle_error(error, context)
|
||||
|
||||
# Check error counting
|
||||
error_key = f"{type(error).__name__}:{context}"
|
||||
assert self.error_handler.error_counts[error_key] == 3
|
||||
|
||||
def test_log_performance_warning(self):
|
||||
"""Test performance warning logging."""
|
||||
operation = "test_operation"
|
||||
duration = 5.0
|
||||
|
||||
self.error_handler.log_performance_warning(operation, duration)
|
||||
|
||||
# Verify warning is logged
|
||||
self.mock_logger.warning.assert_called_once()
|
||||
log_call = self.mock_logger.warning.call_args[0][0]
|
||||
assert "Performance warning" in log_call
|
||||
assert operation in log_call
|
||||
assert str(duration) in log_call
|
||||
|
||||
def test_operation_timer_context_manager(self):
|
||||
"""Test operation timer context manager."""
|
||||
timer = OperationTimer(self.error_handler, "test_operation")
|
||||
|
||||
with timer:
|
||||
time.sleep(0.1) # Short sleep to simulate work
|
||||
|
||||
# With default threshold, this should not trigger a warning
|
||||
self.mock_logger.warning.assert_not_called()
|
||||
|
||||
def test_operation_timer_with_warning(self):
|
||||
"""Test operation timer triggers warning for slow operations."""
|
||||
# Use very low threshold to trigger warning
|
||||
timer = OperationTimer(self.error_handler, "test_operation", warning_threshold=0.01)
|
||||
|
||||
with timer:
|
||||
time.sleep(0.1) # Sleep longer than threshold
|
||||
|
||||
# Should trigger performance warning
|
||||
self.mock_logger.warning.assert_called_once()
|
||||
|
||||
def test_multiple_error_types(self):
|
||||
"""Test handling different types of errors."""
|
||||
errors = [
|
||||
ValueError("Value error"),
|
||||
FileNotFoundError("File not found"),
|
||||
RuntimeError("Runtime error"),
|
||||
]
|
||||
|
||||
for error in errors:
|
||||
self.error_handler.handle_error(error, "Test context")
|
||||
|
||||
# Verify all errors were logged
|
||||
assert self.mock_logger.error.call_count == len(errors)
|
||||
assert self.mock_ui_manager.show_error_dialog.call_count == len(errors)
|
||||
|
||||
|
||||
class TestErrorHandlerEdgeCases:
|
||||
"""Test edge cases and error conditions."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Set up test fixtures."""
|
||||
self.mock_logger = MagicMock()
|
||||
self.error_handler = ErrorHandler(self.mock_logger) # No UI manager
|
||||
|
||||
def test_error_handler_without_ui_manager(self):
|
||||
"""Test error handling when UI manager is not available."""
|
||||
error = ValueError("Test error")
|
||||
|
||||
# Should not raise exception even without UI manager
|
||||
self.error_handler.handle_error(error, "Test context")
|
||||
|
||||
# Should still log the error
|
||||
self.mock_logger.error.assert_called_once()
|
||||
|
||||
def test_handle_none_error(self):
|
||||
"""Test handling when error is None."""
|
||||
# Should handle gracefully
|
||||
self.error_handler.handle_error(None, "Test context")
|
||||
|
||||
# Should still attempt to log
|
||||
self.mock_logger.error.assert_called_once()
|
||||
|
||||
def test_operation_timer_without_error_handler(self):
|
||||
"""Test operation timer with None error handler."""
|
||||
timer = OperationTimer(None, "test_operation")
|
||||
|
||||
# Should not raise exception
|
||||
with timer:
|
||||
time.sleep(0.1)
|
||||
|
||||
def test_empty_context(self):
|
||||
"""Test error handling with empty context."""
|
||||
error = ValueError("Test error")
|
||||
self.error_handler.handle_error(error, "")
|
||||
|
||||
# Should still work with empty context
|
||||
self.mock_logger.error.assert_called_once()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__])
|
||||
0
tests/test_input_validator.py
Normal file
0
tests/test_input_validator.py
Normal file
@@ -11,16 +11,21 @@ from pathlib import Path
|
||||
from unittest.mock import Mock, patch, MagicMock
|
||||
import pytest
|
||||
import pandas as pd
|
||||
import time
|
||||
|
||||
# Add src to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
|
||||
|
||||
from data_manager import DataManager
|
||||
from export_manager import ExportManager
|
||||
from init import logger
|
||||
from input_validator import InputValidator
|
||||
from error_handler import ErrorHandler
|
||||
from auto_save import AutoSaveManager
|
||||
from search_filter import DataFilter, QuickFilters, SearchHistory
|
||||
from medicine_manager import MedicineManager
|
||||
from pathology_manager import PathologyManager
|
||||
from theme_manager import ThemeManager
|
||||
from init import logger
|
||||
|
||||
|
||||
class TestIntegrationSuite:
|
||||
@@ -339,3 +344,341 @@ class TestSystemHealthChecks:
|
||||
|
||||
# These should not raise exceptions
|
||||
assert True, "Logging system working correctly"
|
||||
|
||||
|
||||
class TestNewFeaturesIntegration:
|
||||
"""Integration tests for new features added to TheChart."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_new_features_test(self):
|
||||
"""Set up test environment for new features."""
|
||||
self.temp_dir = tempfile.mkdtemp()
|
||||
self.test_csv = os.path.join(self.temp_dir, "test_data.csv")
|
||||
self.backup_dir = os.path.join(self.temp_dir, "backups")
|
||||
|
||||
# Create sample data
|
||||
sample_data = pd.DataFrame({
|
||||
'date': ['01/01/2024', '01/15/2024', '02/01/2024'],
|
||||
'note': ['First entry', 'Second entry', 'Third entry'],
|
||||
'medicine1': [1, 0, 1], # 1 = taken, 0 = not taken
|
||||
'pathology1': [3, 7, 9]
|
||||
})
|
||||
sample_data.to_csv(self.test_csv, index=False)
|
||||
|
||||
# Initialize managers
|
||||
self.medicine_manager = MedicineManager(logger=logger)
|
||||
self.pathology_manager = PathologyManager(logger=logger)
|
||||
|
||||
yield
|
||||
|
||||
# Cleanup
|
||||
import shutil
|
||||
if os.path.exists(self.temp_dir):
|
||||
shutil.rmtree(self.temp_dir)
|
||||
|
||||
def test_input_validation_integration(self):
|
||||
"""Test input validation system integration."""
|
||||
print("Testing input validation integration...")
|
||||
|
||||
# Test comprehensive validation workflow
|
||||
test_cases = [
|
||||
# (field_type, value, expected_valid)
|
||||
("date", "01/15/2024", True),
|
||||
("date", "invalid-date", False),
|
||||
("pathology_score", "5", True),
|
||||
("pathology_score", "15", False),
|
||||
("note", "Valid note", True),
|
||||
("note", "A" * 1001, False), # Too long
|
||||
("filename", "data.csv", True),
|
||||
("filename", "A" * 150, False), # Too long filename
|
||||
]
|
||||
|
||||
for field_type, value, expected_valid in test_cases:
|
||||
if field_type == "date":
|
||||
is_valid, _, _ = InputValidator.validate_date(value)
|
||||
elif field_type == "pathology_score":
|
||||
is_valid, _, _ = InputValidator.validate_pathology_score(value)
|
||||
elif field_type == "note":
|
||||
is_valid, _, _ = InputValidator.validate_note(value)
|
||||
elif field_type == "filename":
|
||||
is_valid, _, _ = InputValidator.validate_filename(value)
|
||||
|
||||
assert is_valid == expected_valid, \
|
||||
f"Validation failed for {field_type}='{value}': expected {expected_valid}, got {is_valid}"
|
||||
|
||||
def test_error_handling_integration(self):
|
||||
"""Test error handling system integration."""
|
||||
print("Testing error handling integration...")
|
||||
|
||||
# Create a logger for testing
|
||||
import logging
|
||||
test_logger = logging.getLogger("test")
|
||||
mock_ui_manager = MagicMock()
|
||||
error_handler = ErrorHandler(logger=test_logger, ui_manager=mock_ui_manager)
|
||||
|
||||
# Test different error types
|
||||
error_scenarios = [
|
||||
(ValueError("Invalid input"), "Input validation", "Validation failed"),
|
||||
(FileNotFoundError("File not found"), "File operation", "File operation failed"),
|
||||
(RuntimeError("Unknown error"), "Runtime operation", "Unexpected error")
|
||||
]
|
||||
|
||||
for error, context, user_message in error_scenarios:
|
||||
# Test basic error handling
|
||||
error_handler.handle_error(error, context, user_message, show_dialog=False)
|
||||
|
||||
# Verify the UI manager was called to update status
|
||||
assert mock_ui_manager.update_status.called, f"Status update not called for {context}"
|
||||
|
||||
# Test validation error handling
|
||||
error_handler.handle_validation_error("test_field", "Invalid value", "Use a valid value")
|
||||
assert mock_ui_manager.update_status.called, "Validation error handling failed"
|
||||
|
||||
# Test file error handling
|
||||
error_handler.handle_file_error("read", "/test/file.csv", FileNotFoundError("File missing"))
|
||||
assert mock_ui_manager.update_status.called, "File error handling failed"
|
||||
|
||||
def test_auto_save_integration(self):
|
||||
"""Test auto-save system integration."""
|
||||
print("Testing auto-save integration...")
|
||||
|
||||
mock_save_callback = MagicMock()
|
||||
|
||||
auto_save = AutoSaveManager(
|
||||
save_callback=mock_save_callback,
|
||||
interval_minutes=0.01, # Very short for testing
|
||||
)
|
||||
|
||||
try:
|
||||
# Test enabling auto-save
|
||||
auto_save.enable_auto_save()
|
||||
assert auto_save._auto_save_enabled, "Auto-save should be enabled"
|
||||
|
||||
# Test data modification tracking
|
||||
auto_save.mark_data_modified()
|
||||
assert auto_save._data_modified, "Data should be marked as modified"
|
||||
|
||||
# Test force save
|
||||
auto_save.force_save()
|
||||
assert mock_save_callback.called, "Save callback should be called on force save"
|
||||
|
||||
# Test save with modifications
|
||||
auto_save.mark_data_modified()
|
||||
auto_save.force_save() # Call force_save again
|
||||
assert mock_save_callback.call_count >= 2, "Save should be called when data is modified"
|
||||
|
||||
# Test disabling auto-save
|
||||
auto_save.disable_auto_save()
|
||||
assert not auto_save._auto_save_enabled, "Auto-save should be disabled"
|
||||
|
||||
finally:
|
||||
auto_save.disable_auto_save()
|
||||
|
||||
print("Auto-save integration test passed!")
|
||||
|
||||
def test_search_filter_integration(self):
|
||||
"""Test search and filter system integration."""
|
||||
print("Testing search and filter integration...")
|
||||
|
||||
# Load test data
|
||||
test_data = pd.read_csv(self.test_csv)
|
||||
|
||||
data_filter = DataFilter()
|
||||
|
||||
# Test text search
|
||||
data_filter.set_search_term("Second")
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 1, "Text search failed"
|
||||
assert "Second entry" in filtered_data['note'].values
|
||||
|
||||
# Test date range filter
|
||||
data_filter.clear_all_filters()
|
||||
data_filter.set_date_range_filter("01/01/2024", "01/31/2024")
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 2, "Date range filter failed"
|
||||
|
||||
# Test medicine filter
|
||||
data_filter.clear_all_filters()
|
||||
data_filter.set_medicine_filter("medicine1", True) # Taken
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 2, "Medicine filter (taken) failed"
|
||||
|
||||
data_filter.set_medicine_filter("medicine1", False) # Not taken
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 1, "Medicine filter (not taken) failed"
|
||||
|
||||
# Test pathology range filter
|
||||
data_filter.clear_all_filters()
|
||||
data_filter.set_pathology_range_filter("pathology1", 5, 10)
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 2, "Pathology range filter failed"
|
||||
|
||||
# Test combined filters
|
||||
data_filter.clear_all_filters()
|
||||
data_filter.set_search_term("entry")
|
||||
data_filter.set_pathology_range_filter("pathology1", 7, 10)
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 2, "Combined filters failed"
|
||||
|
||||
# Test quick filters
|
||||
QuickFilters.last_week(data_filter)
|
||||
assert "date_range" in data_filter.active_filters, "Quick filter (last week) failed"
|
||||
|
||||
QuickFilters.last_month(data_filter)
|
||||
assert "date_range" in data_filter.active_filters, "Quick filter (last month) failed"
|
||||
|
||||
pathology_keys = self.pathology_manager.get_pathology_keys()
|
||||
if pathology_keys:
|
||||
QuickFilters.high_symptoms(data_filter, pathology_keys)
|
||||
assert "pathologies" in data_filter.active_filters, "Quick filter (high symptoms) failed"
|
||||
|
||||
def test_search_history_integration(self):
|
||||
"""Test search history functionality."""
|
||||
print("Testing search history integration...")
|
||||
|
||||
search_history = SearchHistory()
|
||||
|
||||
# Test adding searches
|
||||
test_searches = ["symptom search", "medication query", "date range"]
|
||||
for search in test_searches:
|
||||
search_history.add_search(search)
|
||||
|
||||
history = search_history.get_history()
|
||||
assert len(history) >= len(test_searches), "Search history not recording properly"
|
||||
|
||||
# Test search suggestions
|
||||
suggestions = search_history.get_suggestions("med")
|
||||
medication_suggestions = [s for s in suggestions if "med" in s.lower()]
|
||||
assert len(medication_suggestions) >= 0, "Search suggestions not working"
|
||||
|
||||
def test_complete_workflow_integration(self):
|
||||
"""Test complete workflow with all new features."""
|
||||
print("Testing complete workflow integration...")
|
||||
|
||||
# Initialize all systems
|
||||
mock_save_callback = MagicMock()
|
||||
auto_save = AutoSaveManager(
|
||||
save_callback=mock_save_callback,
|
||||
interval_minutes=5
|
||||
)
|
||||
data_filter = DataFilter()
|
||||
|
||||
try:
|
||||
# Step 1: Enable auto-save
|
||||
auto_save.enable_auto_save()
|
||||
|
||||
# Step 2: Validate new data entry
|
||||
new_date = "01/15/2024"
|
||||
new_note = "Workflow test entry"
|
||||
|
||||
date_valid, date_msg, _ = InputValidator.validate_date(new_date)
|
||||
note_valid, note_msg, _ = InputValidator.validate_note(new_note)
|
||||
|
||||
assert date_valid, f"Date validation failed: {date_msg}"
|
||||
assert note_valid, f"Note validation failed: {note_msg}"
|
||||
|
||||
score_valid, score_msg, _ = InputValidator.validate_pathology_score("6")
|
||||
assert score_valid, f"Score validation failed: {score_msg}"
|
||||
|
||||
# Step 3: Add validated data to file
|
||||
original_data = pd.read_csv(self.test_csv)
|
||||
new_row = pd.DataFrame({
|
||||
'date': [new_date],
|
||||
'note': [new_note],
|
||||
'medicine1': [0],
|
||||
'pathology1': [6]
|
||||
})
|
||||
updated_data = pd.concat([original_data, new_row], ignore_index=True)
|
||||
updated_data.to_csv(self.test_csv, index=False)
|
||||
|
||||
# Step 4: Mark data as modified for auto-save
|
||||
auto_save.mark_data_modified()
|
||||
auto_save.force_save()
|
||||
assert mock_save_callback.called, "Auto-save should trigger save callback"
|
||||
|
||||
# Step 5: Test filtering on updated data
|
||||
data_filter.set_search_term("Workflow")
|
||||
filtered_data = data_filter.apply_filters(updated_data)
|
||||
assert len(filtered_data) == 1, "Search filter failed on updated data"
|
||||
assert any("Workflow" in note for note in filtered_data['note'].values)
|
||||
|
||||
# Step 6: Test date range filter
|
||||
data_filter.clear_all_filters()
|
||||
data_filter.set_date_range_filter("01/14/2024", "01/16/2024") # Include both entries on 01/15
|
||||
filtered_data = data_filter.apply_filters(updated_data)
|
||||
assert len(filtered_data) == 2, "Date filter failed on new entry"
|
||||
|
||||
# Step 7: Test error handling with invalid operation
|
||||
try:
|
||||
# Simulate file operation error
|
||||
raise FileNotFoundError("Simulated file error")
|
||||
except FileNotFoundError as e:
|
||||
import logging
|
||||
test_logger = logging.getLogger("test")
|
||||
mock_ui_manager = MagicMock()
|
||||
error_handler = ErrorHandler(logger=test_logger, ui_manager=mock_ui_manager)
|
||||
error_handler.handle_error(e, "Test error handling", "Simulated error", show_dialog=False)
|
||||
|
||||
# Verify error was handled
|
||||
assert mock_ui_manager.update_status.called, "Error handling should update status"
|
||||
|
||||
# Step 8: Verify auto-save functionality
|
||||
assert auto_save._auto_save_enabled, "Auto-save should be enabled"
|
||||
auto_save.disable_auto_save()
|
||||
assert not auto_save._auto_save_enabled, "Auto-save should be disabled"
|
||||
|
||||
print("Complete workflow integration test passed!")
|
||||
|
||||
finally:
|
||||
auto_save.disable_auto_save()
|
||||
|
||||
def test_performance_under_load(self):
|
||||
"""Test system performance with larger datasets."""
|
||||
print("Testing performance under load...")
|
||||
|
||||
# Create larger dataset
|
||||
large_data = []
|
||||
for i in range(100):
|
||||
large_data.append({
|
||||
'date': f"01/{(i % 28) + 1:02d}/2024",
|
||||
'note': f"Entry number {i}",
|
||||
'medicine1': 1 if i % 2 == 0 else 0,
|
||||
'pathology1': (i % 10) + 1
|
||||
})
|
||||
|
||||
large_df = pd.DataFrame(large_data)
|
||||
large_csv = os.path.join(self.temp_dir, "large_data.csv")
|
||||
large_df.to_csv(large_csv, index=False)
|
||||
|
||||
# Test filtering performance
|
||||
data_filter = DataFilter()
|
||||
|
||||
start_time = time.time()
|
||||
data_filter.set_search_term("Entry")
|
||||
filtered_data = data_filter.apply_filters(large_df)
|
||||
search_time = time.time() - start_time
|
||||
|
||||
assert len(filtered_data) == 100, "Search filter failed on large dataset"
|
||||
assert search_time < 1.0, f"Search took too long: {search_time:.2f}s"
|
||||
|
||||
# Test auto-save performance
|
||||
mock_save_callback = MagicMock()
|
||||
auto_save = AutoSaveManager(
|
||||
save_callback=mock_save_callback,
|
||||
interval_minutes=5
|
||||
)
|
||||
|
||||
try:
|
||||
start_time = time.time()
|
||||
auto_save.enable_auto_save()
|
||||
auto_save.mark_data_modified()
|
||||
auto_save.force_save()
|
||||
save_time = time.time() - start_time
|
||||
|
||||
assert mock_save_callback.called, "Save callback should be called"
|
||||
assert save_time < 2.0, f"Save took too long: {save_time:.2f}s"
|
||||
|
||||
finally:
|
||||
auto_save.disable_auto_save()
|
||||
|
||||
print(f"Performance test completed: Search={search_time:.3f}s, Save={save_time:.3f}s")
|
||||
|
||||
353
tests/test_search_filter.py
Normal file
353
tests/test_search_filter.py
Normal file
@@ -0,0 +1,353 @@
|
||||
"""Tests for search and filter system."""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime, timedelta
|
||||
import pandas as pd
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from src.search_filter import DataFilter, QuickFilters, SearchHistory
|
||||
|
||||
|
||||
class TestDataFilter:
|
||||
"""Test cases for DataFilter class."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create sample data for testing
|
||||
self.sample_data = pd.DataFrame({
|
||||
'Date': ['2024-01-01', '2024-01-15', '2024-02-01', '2024-02-15'],
|
||||
'Notes': ['First entry', 'Second entry', 'Third entry', 'Fourth entry'],
|
||||
'medicine1': ['08:00:1', '', '12:00:2', '09:00:1|21:00:1'],
|
||||
'medicine2': ['', '10:00:1', '', '14:00:0.5'],
|
||||
'pathology1': [3, 7, 5, 9],
|
||||
'pathology2': [2, 8, 4, 6]
|
||||
})
|
||||
|
||||
self.data_filter = DataFilter()
|
||||
|
||||
def test_initialization(self):
|
||||
"""Test DataFilter initialization."""
|
||||
assert len(self.data_filter.active_filters) == 0
|
||||
assert self.data_filter.search_term == ""
|
||||
|
||||
def test_set_search_term(self):
|
||||
"""Test setting search term."""
|
||||
self.data_filter.set_search_term("test search")
|
||||
assert self.data_filter.search_term == "test search"
|
||||
|
||||
# Clear search term
|
||||
self.data_filter.set_search_term("")
|
||||
assert self.data_filter.search_term == ""
|
||||
|
||||
def test_text_search_in_notes(self):
|
||||
"""Test text search in notes field."""
|
||||
self.data_filter.set_search_term("Second")
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
assert len(filtered_data) == 1
|
||||
assert "Second entry" in filtered_data['Notes'].values
|
||||
|
||||
def test_text_search_in_dates(self):
|
||||
"""Test text search in dates."""
|
||||
self.data_filter.set_search_term("2024-02")
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
assert len(filtered_data) == 2
|
||||
assert all("2024-02" in date for date in filtered_data['Date'].values)
|
||||
|
||||
def test_text_search_case_insensitive(self):
|
||||
"""Test that text search is case insensitive."""
|
||||
self.data_filter.set_search_term("FIRST")
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
assert len(filtered_data) == 1
|
||||
assert "First entry" in filtered_data['Notes'].values
|
||||
|
||||
def test_date_range_filter(self):
|
||||
"""Test date range filtering."""
|
||||
self.data_filter.set_date_range_filter("2024-01-10", "2024-02-10")
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
assert len(filtered_data) == 2
|
||||
dates = pd.to_datetime(filtered_data['Date'])
|
||||
assert all(pd.to_datetime("2024-01-10") <= date <= pd.to_datetime("2024-02-10") for date in dates)
|
||||
|
||||
def test_date_range_filter_start_only(self):
|
||||
"""Test date range filter with only start date."""
|
||||
self.data_filter.set_date_range_filter("2024-02-01", None)
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
assert len(filtered_data) == 2
|
||||
dates = pd.to_datetime(filtered_data['Date'])
|
||||
assert all(date >= pd.to_datetime("2024-02-01") for date in dates)
|
||||
|
||||
def test_date_range_filter_end_only(self):
|
||||
"""Test date range filter with only end date."""
|
||||
self.data_filter.set_date_range_filter(None, "2024-01-31")
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
assert len(filtered_data) == 2
|
||||
dates = pd.to_datetime(filtered_data['Date'])
|
||||
assert all(date <= pd.to_datetime("2024-01-31") for date in dates)
|
||||
|
||||
def test_medicine_filter_taken(self):
|
||||
"""Test medicine filter for taken medicines."""
|
||||
self.data_filter.set_medicine_filter("medicine1", True)
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
# Should return rows where medicine1 has a non-empty value
|
||||
assert len(filtered_data) == 3
|
||||
assert all(val != '' for val in filtered_data['medicine1'].values)
|
||||
|
||||
def test_medicine_filter_not_taken(self):
|
||||
"""Test medicine filter for not taken medicines."""
|
||||
self.data_filter.set_medicine_filter("medicine1", False)
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
# Should return rows where medicine1 is empty
|
||||
assert len(filtered_data) == 1
|
||||
assert filtered_data['medicine1'].iloc[0] == ''
|
||||
|
||||
def test_pathology_range_filter(self):
|
||||
"""Test pathology score range filtering."""
|
||||
self.data_filter.set_pathology_range_filter("pathology1", 5, 8)
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
assert len(filtered_data) == 2
|
||||
scores = filtered_data['pathology1'].values
|
||||
assert all(5 <= score <= 8 for score in scores)
|
||||
|
||||
def test_pathology_range_filter_min_only(self):
|
||||
"""Test pathology filter with only minimum value."""
|
||||
self.data_filter.set_pathology_range_filter("pathology1", 6, None)
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
assert len(filtered_data) == 2
|
||||
scores = filtered_data['pathology1'].values
|
||||
assert all(score >= 6 for score in scores)
|
||||
|
||||
def test_pathology_range_filter_max_only(self):
|
||||
"""Test pathology filter with only maximum value."""
|
||||
self.data_filter.set_pathology_range_filter("pathology1", None, 5)
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
assert len(filtered_data) == 2
|
||||
scores = filtered_data['pathology1'].values
|
||||
assert all(score <= 5 for score in scores)
|
||||
|
||||
def test_combined_filters(self):
|
||||
"""Test combining multiple filters."""
|
||||
self.data_filter.set_search_term("entry")
|
||||
self.data_filter.set_date_range_filter("2024-01-01", "2024-01-31")
|
||||
self.data_filter.set_medicine_filter("medicine1", True)
|
||||
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
|
||||
# Should satisfy all conditions
|
||||
assert len(filtered_data) == 1
|
||||
assert "entry" in filtered_data['Notes'].iloc[0]
|
||||
assert filtered_data['Date'].iloc[0].startswith("2024-01")
|
||||
assert filtered_data['medicine1'].iloc[0] != ''
|
||||
|
||||
def test_clear_filter(self):
|
||||
"""Test clearing specific filter types."""
|
||||
# Set multiple filters
|
||||
self.data_filter.set_search_term("test")
|
||||
self.data_filter.set_date_range_filter("2024-01-01", "2024-12-31")
|
||||
self.data_filter.set_medicine_filter("medicine1", True)
|
||||
|
||||
# Clear date range filter
|
||||
self.data_filter.clear_filter("date_range")
|
||||
|
||||
assert "date_range" not in self.data_filter.active_filters
|
||||
assert self.data_filter.search_term == "test" # Other filters remain
|
||||
|
||||
def test_clear_all_filters(self):
|
||||
"""Test clearing all filters."""
|
||||
# Set multiple filters
|
||||
self.data_filter.set_search_term("test")
|
||||
self.data_filter.set_date_range_filter("2024-01-01", "2024-12-31")
|
||||
self.data_filter.set_medicine_filter("medicine1", True)
|
||||
|
||||
# Clear all filters
|
||||
self.data_filter.clear_all_filters()
|
||||
|
||||
assert len(self.data_filter.active_filters) == 0
|
||||
assert self.data_filter.search_term == ""
|
||||
|
||||
def test_get_filter_summary(self):
|
||||
"""Test getting filter summary."""
|
||||
# No filters
|
||||
summary = self.data_filter.get_filter_summary()
|
||||
assert not summary["has_filters"]
|
||||
assert summary["search_term"] == ""
|
||||
assert len(summary["filters"]) == 0
|
||||
|
||||
# With filters
|
||||
self.data_filter.set_search_term("test")
|
||||
self.data_filter.set_date_range_filter("2024-01-01", "2024-12-31")
|
||||
|
||||
summary = self.data_filter.get_filter_summary()
|
||||
assert summary["has_filters"]
|
||||
assert summary["search_term"] == "test"
|
||||
assert "date_range" in summary["filters"]
|
||||
|
||||
def test_no_filters_returns_original_data(self):
|
||||
"""Test that no filters returns original data unchanged."""
|
||||
filtered_data = self.data_filter.apply_filters(self.sample_data)
|
||||
pd.testing.assert_frame_equal(filtered_data, self.sample_data)
|
||||
|
||||
def test_filter_with_empty_data(self):
|
||||
"""Test filtering with empty DataFrame."""
|
||||
empty_data = pd.DataFrame()
|
||||
self.data_filter.set_search_term("test")
|
||||
|
||||
filtered_data = self.data_filter.apply_filters(empty_data)
|
||||
assert len(filtered_data) == 0
|
||||
|
||||
def test_invalid_date_handling(self):
|
||||
"""Test handling of invalid dates in data."""
|
||||
invalid_data = self.sample_data.copy()
|
||||
invalid_data.loc[0, 'Date'] = 'invalid-date'
|
||||
|
||||
self.data_filter.set_date_range_filter("2024-01-01", "2024-12-31")
|
||||
|
||||
# Should handle invalid dates gracefully
|
||||
filtered_data = self.data_filter.apply_filters(invalid_data)
|
||||
assert len(filtered_data) >= 0 # Should not crash
|
||||
|
||||
|
||||
class TestQuickFilters:
|
||||
"""Test cases for QuickFilters class."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Set up test fixtures."""
|
||||
self.data_filter = DataFilter()
|
||||
|
||||
def test_last_week_filter(self):
|
||||
"""Test last week quick filter."""
|
||||
QuickFilters.last_week(self.data_filter)
|
||||
|
||||
assert "date_range" in self.data_filter.active_filters
|
||||
date_filter = self.data_filter.active_filters["date_range"]
|
||||
|
||||
# Should have end date as today and start date 7 days ago
|
||||
end_date = pd.to_datetime(date_filter["end"])
|
||||
start_date = pd.to_datetime(date_filter["start"])
|
||||
|
||||
assert (end_date - start_date).days == 6 # 7 days inclusive
|
||||
|
||||
def test_last_month_filter(self):
|
||||
"""Test last month quick filter."""
|
||||
QuickFilters.last_month(self.data_filter)
|
||||
|
||||
assert "date_range" in self.data_filter.active_filters
|
||||
date_filter = self.data_filter.active_filters["date_range"]
|
||||
|
||||
# Should have end date as today and start date 30 days ago
|
||||
end_date = pd.to_datetime(date_filter["end"])
|
||||
start_date = pd.to_datetime(date_filter["start"])
|
||||
|
||||
assert (end_date - start_date).days == 29 # 30 days inclusive
|
||||
|
||||
def test_this_month_filter(self):
|
||||
"""Test this month quick filter."""
|
||||
QuickFilters.this_month(self.data_filter)
|
||||
|
||||
assert "date_range" in self.data_filter.active_filters
|
||||
date_filter = self.data_filter.active_filters["date_range"]
|
||||
|
||||
# Should start from first day of current month
|
||||
start_date = pd.to_datetime(date_filter["start"])
|
||||
today = pd.to_datetime("today")
|
||||
|
||||
assert start_date.day == 1
|
||||
assert start_date.month == today.month
|
||||
assert start_date.year == today.year
|
||||
|
||||
def test_high_symptoms_filter(self):
|
||||
"""Test high symptoms quick filter."""
|
||||
pathology_keys = ["pathology1", "pathology2", "pathology3"]
|
||||
|
||||
QuickFilters.high_symptoms(self.data_filter, pathology_keys)
|
||||
|
||||
assert "pathologies" in self.data_filter.active_filters
|
||||
pathology_filters = self.data_filter.active_filters["pathologies"]
|
||||
|
||||
# Should set minimum score of 8 for all pathologies
|
||||
for key in pathology_keys:
|
||||
assert key in pathology_filters
|
||||
assert pathology_filters[key]["min"] == 8
|
||||
assert pathology_filters[key]["max"] is None
|
||||
|
||||
|
||||
class TestSearchHistory:
|
||||
"""Test cases for SearchHistory class."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Set up test fixtures."""
|
||||
self.search_history = SearchHistory()
|
||||
|
||||
def test_initialization(self):
|
||||
"""Test SearchHistory initialization."""
|
||||
assert len(self.search_history.get_history()) == 0
|
||||
|
||||
def test_add_search(self):
|
||||
"""Test adding search terms."""
|
||||
self.search_history.add_search("test search")
|
||||
|
||||
history = self.search_history.get_history()
|
||||
assert len(history) == 1
|
||||
assert "test search" in history
|
||||
|
||||
def test_duplicate_search_handling(self):
|
||||
"""Test that duplicate searches are handled appropriately."""
|
||||
self.search_history.add_search("test search")
|
||||
self.search_history.add_search("test search")
|
||||
|
||||
history = self.search_history.get_history()
|
||||
# Implementation may vary - could deduplicate or keep most recent
|
||||
assert "test search" in history
|
||||
|
||||
def test_empty_search_handling(self):
|
||||
"""Test handling of empty search terms."""
|
||||
self.search_history.add_search("")
|
||||
self.search_history.add_search(" ") # Whitespace only
|
||||
|
||||
history = self.search_history.get_history()
|
||||
# Empty/whitespace searches should be ignored or handled appropriately
|
||||
assert len([s for s in history if s.strip()]) == 0
|
||||
|
||||
def test_search_history_limit(self):
|
||||
"""Test search history size limit."""
|
||||
# Add many searches
|
||||
for i in range(20):
|
||||
self.search_history.add_search(f"search {i}")
|
||||
|
||||
history = self.search_history.get_history()
|
||||
# Should have reasonable limit (implementation dependent)
|
||||
assert len(history) <= 15 # Assuming max 15 items
|
||||
|
||||
def test_get_suggestions(self):
|
||||
"""Test getting search suggestions."""
|
||||
# Add some searches
|
||||
searches = ["apple pie", "apple tart", "banana bread", "chocolate cake"]
|
||||
for search in searches:
|
||||
self.search_history.add_search(search)
|
||||
|
||||
# Test prefix matching
|
||||
suggestions = self.search_history.get_suggestions("app")
|
||||
apple_suggestions = [s for s in suggestions if "apple" in s.lower()]
|
||||
assert len(apple_suggestions) >= 1
|
||||
|
||||
def test_clear_history(self):
|
||||
"""Test clearing search history."""
|
||||
# Add some searches
|
||||
self.search_history.add_search("test1")
|
||||
self.search_history.add_search("test2")
|
||||
|
||||
# Clear history
|
||||
self.search_history.clear_history()
|
||||
|
||||
history = self.search_history.get_history()
|
||||
assert len(history) == 0
|
||||
335
tests/test_search_filter_ui.py
Normal file
335
tests/test_search_filter_ui.py
Normal file
@@ -0,0 +1,335 @@
|
||||
"""Tests for search and filter UI components."""
|
||||
|
||||
import pytest
|
||||
import tkinter as tk
|
||||
from unittest.mock import MagicMock, patch
|
||||
from tkinter import ttk
|
||||
|
||||
from src.search_filter_ui import SearchFilterWidget
|
||||
from src.search_filter import DataFilter
|
||||
|
||||
|
||||
class TestSearchFilterWidget:
|
||||
"""Test cases for SearchFilterWidget class."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Set up test fixtures."""
|
||||
# Create root window for testing
|
||||
self.root = tk.Tk()
|
||||
self.root.withdraw() # Hide window during testing
|
||||
|
||||
# Mock managers and dependencies
|
||||
self.mock_data_filter = MagicMock(spec=DataFilter)
|
||||
self.mock_update_callback = MagicMock()
|
||||
self.mock_medicine_manager = MagicMock()
|
||||
self.mock_pathology_manager = MagicMock()
|
||||
|
||||
# Configure mock medicine manager
|
||||
self.mock_medicine_manager.get_medicine_keys.return_value = ["med1", "med2"]
|
||||
mock_medicine1 = MagicMock()
|
||||
mock_medicine1.display_name = "Medicine 1"
|
||||
mock_medicine2 = MagicMock()
|
||||
mock_medicine2.display_name = "Medicine 2"
|
||||
self.mock_medicine_manager.get_medicine.side_effect = lambda key: {
|
||||
"med1": mock_medicine1,
|
||||
"med2": mock_medicine2
|
||||
}.get(key)
|
||||
|
||||
# Configure mock pathology manager
|
||||
self.mock_pathology_manager.get_pathology_keys.return_value = ["path1", "path2"]
|
||||
mock_pathology1 = MagicMock()
|
||||
mock_pathology1.display_name = "Pathology 1"
|
||||
mock_pathology2 = MagicMock()
|
||||
mock_pathology2.display_name = "Pathology 2"
|
||||
self.mock_pathology_manager.get_pathology.side_effect = lambda key: {
|
||||
"path1": mock_pathology1,
|
||||
"path2": mock_pathology2
|
||||
}.get(key)
|
||||
|
||||
# Create main frame as parent
|
||||
self.parent_frame = ttk.Frame(self.root)
|
||||
self.parent_frame.pack(fill="both", expand=True)
|
||||
|
||||
# Create widget
|
||||
self.search_widget = SearchFilterWidget(
|
||||
parent=self.parent_frame,
|
||||
data_filter=self.mock_data_filter,
|
||||
update_callback=self.mock_update_callback,
|
||||
medicine_manager=self.mock_medicine_manager,
|
||||
pathology_manager=self.mock_pathology_manager
|
||||
)
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up test fixtures."""
|
||||
if hasattr(self, 'search_widget'):
|
||||
self.search_widget.hide()
|
||||
if hasattr(self, 'root'):
|
||||
self.root.destroy()
|
||||
|
||||
def test_initialization(self):
|
||||
"""Test SearchFilterWidget initialization."""
|
||||
assert self.search_widget.parent == self.parent_frame
|
||||
assert self.search_widget.data_filter == self.mock_data_filter
|
||||
assert self.search_widget.update_callback == self.mock_update_callback
|
||||
assert not self.search_widget.is_visible
|
||||
|
||||
# Check that UI variables are initialized
|
||||
assert hasattr(self.search_widget, 'search_var')
|
||||
assert hasattr(self.search_widget, 'start_date_var')
|
||||
assert hasattr(self.search_widget, 'end_date_var')
|
||||
assert hasattr(self.search_widget, 'medicine_vars')
|
||||
assert hasattr(self.search_widget, 'pathology_min_vars')
|
||||
assert hasattr(self.search_widget, 'pathology_max_vars')
|
||||
|
||||
def test_widget_creation(self):
|
||||
"""Test that widget components are created properly."""
|
||||
widget = self.search_widget.get_widget()
|
||||
assert isinstance(widget, ttk.LabelFrame)
|
||||
assert widget.winfo_exists()
|
||||
|
||||
def test_medicine_variables_creation(self):
|
||||
"""Test that medicine filter variables are created."""
|
||||
assert "med1" in self.search_widget.medicine_vars
|
||||
assert "med2" in self.search_widget.medicine_vars
|
||||
|
||||
# Test default values
|
||||
assert self.search_widget.medicine_vars["med1"].get() == "any"
|
||||
assert self.search_widget.medicine_vars["med2"].get() == "any"
|
||||
|
||||
def test_pathology_variables_creation(self):
|
||||
"""Test that pathology filter variables are created."""
|
||||
assert "path1" in self.search_widget.pathology_min_vars
|
||||
assert "path1" in self.search_widget.pathology_max_vars
|
||||
assert "path2" in self.search_widget.pathology_min_vars
|
||||
assert "path2" in self.search_widget.pathology_max_vars
|
||||
|
||||
def test_show_hide_functionality(self):
|
||||
"""Test show and hide functionality."""
|
||||
# Initially hidden
|
||||
assert not self.search_widget.is_visible
|
||||
|
||||
# Show widget
|
||||
self.search_widget.show()
|
||||
assert self.search_widget.is_visible
|
||||
|
||||
# Hide widget
|
||||
self.search_widget.hide()
|
||||
assert not self.search_widget.is_visible
|
||||
|
||||
def test_toggle_functionality(self):
|
||||
"""Test toggle functionality."""
|
||||
# Initially hidden, toggle should show
|
||||
initial_state = self.search_widget.is_visible
|
||||
self.search_widget.toggle()
|
||||
assert self.search_widget.is_visible != initial_state
|
||||
|
||||
# Toggle again should hide
|
||||
self.search_widget.toggle()
|
||||
assert self.search_widget.is_visible == initial_state
|
||||
|
||||
def test_search_change_callback(self):
|
||||
"""Test search term change callback."""
|
||||
# Set search term
|
||||
self.search_widget.search_var.set("test search")
|
||||
|
||||
# Should trigger update callback
|
||||
self.root.update() # Process events
|
||||
|
||||
# Verify data filter was updated
|
||||
self.mock_data_filter.set_search_term.assert_called_with("test search")
|
||||
self.mock_update_callback.assert_called()
|
||||
|
||||
def test_date_change_callback(self):
|
||||
"""Test date range change callback."""
|
||||
# Set date range
|
||||
self.search_widget.start_date_var.set("2024-01-01")
|
||||
self.search_widget.end_date_var.set("2024-12-31")
|
||||
|
||||
# Process events
|
||||
self.root.update()
|
||||
|
||||
# Verify data filter was updated
|
||||
self.mock_data_filter.set_date_range_filter.assert_called()
|
||||
|
||||
def test_medicine_change_callback(self):
|
||||
"""Test medicine filter change callback."""
|
||||
# Set medicine filter
|
||||
self.search_widget.medicine_vars["med1"].set("taken")
|
||||
|
||||
# Process events
|
||||
self.root.update()
|
||||
|
||||
# Verify data filter was updated
|
||||
self.mock_data_filter.set_medicine_filter.assert_called()
|
||||
self.mock_update_callback.assert_called()
|
||||
|
||||
def test_pathology_change_callback(self):
|
||||
"""Test pathology filter change callback."""
|
||||
# Set pathology range
|
||||
self.search_widget.pathology_min_vars["path1"].set("5")
|
||||
self.search_widget.pathology_max_vars["path1"].set("9")
|
||||
|
||||
# Process events
|
||||
self.root.update()
|
||||
|
||||
# Verify data filter was updated
|
||||
self.mock_data_filter.set_pathology_range_filter.assert_called()
|
||||
|
||||
def test_clear_search_functionality(self):
|
||||
"""Test clear search functionality."""
|
||||
# Set search term
|
||||
self.search_widget.search_var.set("test search")
|
||||
|
||||
# Clear search
|
||||
self.search_widget._clear_search()
|
||||
|
||||
assert self.search_widget.search_var.get() == ""
|
||||
|
||||
def test_clear_all_filters_functionality(self):
|
||||
"""Test clear all filters functionality."""
|
||||
# Set various filters
|
||||
self.search_widget.search_var.set("test")
|
||||
self.search_widget.start_date_var.set("2024-01-01")
|
||||
self.search_widget.medicine_vars["med1"].set("taken")
|
||||
self.search_widget.pathology_min_vars["path1"].set("5")
|
||||
|
||||
# Clear all filters
|
||||
self.search_widget._clear_all_filters()
|
||||
|
||||
# Verify all are cleared
|
||||
assert self.search_widget.search_var.get() == ""
|
||||
assert self.search_widget.start_date_var.get() == ""
|
||||
assert self.search_widget.medicine_vars["med1"].get() == "any"
|
||||
assert self.search_widget.pathology_min_vars["path1"].get() == ""
|
||||
|
||||
# Verify data filter was cleared
|
||||
self.mock_data_filter.clear_all_filters.assert_called()
|
||||
|
||||
def test_quick_filter_buttons(self):
|
||||
"""Test quick filter button functionality."""
|
||||
with patch('src.search_filter.QuickFilters') as mock_quick_filters:
|
||||
# Test week filter
|
||||
self.search_widget._filter_last_week()
|
||||
mock_quick_filters.last_week.assert_called_with(self.mock_data_filter)
|
||||
|
||||
# Test month filter
|
||||
self.search_widget._filter_last_month()
|
||||
mock_quick_filters.last_month.assert_called_with(self.mock_data_filter)
|
||||
|
||||
# Test high symptoms filter
|
||||
self.search_widget._filter_high_symptoms()
|
||||
mock_quick_filters.high_symptoms.assert_called()
|
||||
|
||||
def test_apply_filters_functionality(self):
|
||||
"""Test manual apply filters functionality."""
|
||||
# Set some filters
|
||||
self.search_widget.search_var.set("test")
|
||||
self.search_widget.start_date_var.set("2024-01-01")
|
||||
|
||||
# Apply filters manually
|
||||
self.search_widget._apply_filters()
|
||||
|
||||
# Should have called various filter methods
|
||||
self.mock_data_filter.set_search_term.assert_called()
|
||||
self.mock_data_filter.set_date_range_filter.assert_called()
|
||||
|
||||
def test_status_update(self):
|
||||
"""Test status label update functionality."""
|
||||
# Mock filter summary
|
||||
mock_summary = {
|
||||
"has_filters": True,
|
||||
"search_term": "test",
|
||||
"filters": {
|
||||
"date_range": {"start": "2024-01-01", "end": "2024-12-31"},
|
||||
"medicines": {"taken": ["med1"], "not_taken": []},
|
||||
"pathologies": {"path1": {"min": 5, "max": 9}}
|
||||
}
|
||||
}
|
||||
|
||||
self.mock_data_filter.get_filter_summary.return_value = mock_summary
|
||||
|
||||
# Update status
|
||||
self.search_widget._update_status()
|
||||
|
||||
# Check that status label was updated
|
||||
status_text = self.search_widget.status_label.cget("text")
|
||||
assert "Active filters" in status_text
|
||||
|
||||
def test_no_medicines_handling(self):
|
||||
"""Test handling when no medicines are configured."""
|
||||
# Create widget with no medicines
|
||||
self.mock_medicine_manager.get_medicine_keys.return_value = []
|
||||
|
||||
widget = SearchFilterWidget(
|
||||
parent=self.parent_frame,
|
||||
data_filter=self.mock_data_filter,
|
||||
update_callback=self.mock_update_callback,
|
||||
medicine_manager=self.mock_medicine_manager,
|
||||
pathology_manager=self.mock_pathology_manager
|
||||
)
|
||||
|
||||
assert len(widget.medicine_vars) == 0
|
||||
|
||||
def test_no_pathologies_handling(self):
|
||||
"""Test handling when no pathologies are configured."""
|
||||
# Create widget with no pathologies
|
||||
self.mock_pathology_manager.get_pathology_keys.return_value = []
|
||||
|
||||
widget = SearchFilterWidget(
|
||||
parent=self.parent_frame,
|
||||
data_filter=self.mock_data_filter,
|
||||
update_callback=self.mock_update_callback,
|
||||
medicine_manager=self.mock_medicine_manager,
|
||||
pathology_manager=self.mock_pathology_manager
|
||||
)
|
||||
|
||||
assert len(widget.pathology_min_vars) == 0
|
||||
assert len(widget.pathology_max_vars) == 0
|
||||
|
||||
def test_horizontal_layout(self):
|
||||
"""Test that the horizontal layout is properly implemented."""
|
||||
widget = self.search_widget.get_widget()
|
||||
|
||||
# Widget should exist and be properly configured
|
||||
assert widget.winfo_exists()
|
||||
|
||||
# The main frame should be a LabelFrame with "Search & Filter" text
|
||||
assert isinstance(widget, ttk.LabelFrame)
|
||||
|
||||
def test_grid_configuration(self):
|
||||
"""Test grid configuration for parent row management."""
|
||||
# Mock parent with grid_rowconfigure method
|
||||
mock_parent = MagicMock()
|
||||
mock_parent.grid_rowconfigure = MagicMock()
|
||||
|
||||
widget = SearchFilterWidget(
|
||||
parent=mock_parent,
|
||||
data_filter=self.mock_data_filter,
|
||||
update_callback=self.mock_update_callback,
|
||||
medicine_manager=self.mock_medicine_manager,
|
||||
pathology_manager=self.mock_pathology_manager
|
||||
)
|
||||
|
||||
# Show widget
|
||||
widget.show()
|
||||
|
||||
# Should configure parent grid row
|
||||
mock_parent.grid_rowconfigure.assert_called_with(1, minsize=150, weight=0)
|
||||
|
||||
# Hide widget
|
||||
widget.hide()
|
||||
|
||||
# Should reset parent grid row
|
||||
mock_parent.grid_rowconfigure.assert_called_with(1, minsize=0, weight=0)
|
||||
|
||||
def test_widget_responsiveness(self):
|
||||
"""Test that widget responds to window resize."""
|
||||
# This is a basic test - in a real scenario you'd test actual resize behavior
|
||||
widget = self.search_widget.get_widget()
|
||||
|
||||
# Widget should be able to handle pack/grid configuration
|
||||
assert widget.winfo_exists()
|
||||
|
||||
# Show and hide should work without errors
|
||||
self.search_widget.show()
|
||||
self.search_widget.hide()
|
||||
Reference in New Issue
Block a user