Add comprehensive tests for error handling, input validation, search filtering, and UI components
Some checks failed
Build and Push Docker Image / build-and-push (push) Has been cancelled
Some checks failed
Build and Push Docker Image / build-and-push (push) Has been cancelled
- Implemented unit tests for the ErrorHandler class, covering error handling, frequency tracking, and performance warnings. - Created integration tests for input validation, error handling, auto-save functionality, and search/filter systems. - Developed unit tests for the DataFilter, QuickFilters, and SearchHistory classes to ensure filtering logic works as expected. - Added tests for the SearchFilterWidget UI component, verifying initialization, filter functionality, and responsiveness. - Included edge case tests for error handling without UI manager and handling of None values.
This commit is contained in:
@@ -11,16 +11,21 @@ from pathlib import Path
|
||||
from unittest.mock import Mock, patch, MagicMock
|
||||
import pytest
|
||||
import pandas as pd
|
||||
import time
|
||||
|
||||
# Add src to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
|
||||
|
||||
from data_manager import DataManager
|
||||
from export_manager import ExportManager
|
||||
from init import logger
|
||||
from input_validator import InputValidator
|
||||
from error_handler import ErrorHandler
|
||||
from auto_save import AutoSaveManager
|
||||
from search_filter import DataFilter, QuickFilters, SearchHistory
|
||||
from medicine_manager import MedicineManager
|
||||
from pathology_manager import PathologyManager
|
||||
from theme_manager import ThemeManager
|
||||
from init import logger
|
||||
|
||||
|
||||
class TestIntegrationSuite:
|
||||
@@ -339,3 +344,341 @@ class TestSystemHealthChecks:
|
||||
|
||||
# These should not raise exceptions
|
||||
assert True, "Logging system working correctly"
|
||||
|
||||
|
||||
class TestNewFeaturesIntegration:
|
||||
"""Integration tests for new features added to TheChart."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_new_features_test(self):
|
||||
"""Set up test environment for new features."""
|
||||
self.temp_dir = tempfile.mkdtemp()
|
||||
self.test_csv = os.path.join(self.temp_dir, "test_data.csv")
|
||||
self.backup_dir = os.path.join(self.temp_dir, "backups")
|
||||
|
||||
# Create sample data
|
||||
sample_data = pd.DataFrame({
|
||||
'date': ['01/01/2024', '01/15/2024', '02/01/2024'],
|
||||
'note': ['First entry', 'Second entry', 'Third entry'],
|
||||
'medicine1': [1, 0, 1], # 1 = taken, 0 = not taken
|
||||
'pathology1': [3, 7, 9]
|
||||
})
|
||||
sample_data.to_csv(self.test_csv, index=False)
|
||||
|
||||
# Initialize managers
|
||||
self.medicine_manager = MedicineManager(logger=logger)
|
||||
self.pathology_manager = PathologyManager(logger=logger)
|
||||
|
||||
yield
|
||||
|
||||
# Cleanup
|
||||
import shutil
|
||||
if os.path.exists(self.temp_dir):
|
||||
shutil.rmtree(self.temp_dir)
|
||||
|
||||
def test_input_validation_integration(self):
|
||||
"""Test input validation system integration."""
|
||||
print("Testing input validation integration...")
|
||||
|
||||
# Test comprehensive validation workflow
|
||||
test_cases = [
|
||||
# (field_type, value, expected_valid)
|
||||
("date", "01/15/2024", True),
|
||||
("date", "invalid-date", False),
|
||||
("pathology_score", "5", True),
|
||||
("pathology_score", "15", False),
|
||||
("note", "Valid note", True),
|
||||
("note", "A" * 1001, False), # Too long
|
||||
("filename", "data.csv", True),
|
||||
("filename", "A" * 150, False), # Too long filename
|
||||
]
|
||||
|
||||
for field_type, value, expected_valid in test_cases:
|
||||
if field_type == "date":
|
||||
is_valid, _, _ = InputValidator.validate_date(value)
|
||||
elif field_type == "pathology_score":
|
||||
is_valid, _, _ = InputValidator.validate_pathology_score(value)
|
||||
elif field_type == "note":
|
||||
is_valid, _, _ = InputValidator.validate_note(value)
|
||||
elif field_type == "filename":
|
||||
is_valid, _, _ = InputValidator.validate_filename(value)
|
||||
|
||||
assert is_valid == expected_valid, \
|
||||
f"Validation failed for {field_type}='{value}': expected {expected_valid}, got {is_valid}"
|
||||
|
||||
def test_error_handling_integration(self):
|
||||
"""Test error handling system integration."""
|
||||
print("Testing error handling integration...")
|
||||
|
||||
# Create a logger for testing
|
||||
import logging
|
||||
test_logger = logging.getLogger("test")
|
||||
mock_ui_manager = MagicMock()
|
||||
error_handler = ErrorHandler(logger=test_logger, ui_manager=mock_ui_manager)
|
||||
|
||||
# Test different error types
|
||||
error_scenarios = [
|
||||
(ValueError("Invalid input"), "Input validation", "Validation failed"),
|
||||
(FileNotFoundError("File not found"), "File operation", "File operation failed"),
|
||||
(RuntimeError("Unknown error"), "Runtime operation", "Unexpected error")
|
||||
]
|
||||
|
||||
for error, context, user_message in error_scenarios:
|
||||
# Test basic error handling
|
||||
error_handler.handle_error(error, context, user_message, show_dialog=False)
|
||||
|
||||
# Verify the UI manager was called to update status
|
||||
assert mock_ui_manager.update_status.called, f"Status update not called for {context}"
|
||||
|
||||
# Test validation error handling
|
||||
error_handler.handle_validation_error("test_field", "Invalid value", "Use a valid value")
|
||||
assert mock_ui_manager.update_status.called, "Validation error handling failed"
|
||||
|
||||
# Test file error handling
|
||||
error_handler.handle_file_error("read", "/test/file.csv", FileNotFoundError("File missing"))
|
||||
assert mock_ui_manager.update_status.called, "File error handling failed"
|
||||
|
||||
def test_auto_save_integration(self):
|
||||
"""Test auto-save system integration."""
|
||||
print("Testing auto-save integration...")
|
||||
|
||||
mock_save_callback = MagicMock()
|
||||
|
||||
auto_save = AutoSaveManager(
|
||||
save_callback=mock_save_callback,
|
||||
interval_minutes=0.01, # Very short for testing
|
||||
)
|
||||
|
||||
try:
|
||||
# Test enabling auto-save
|
||||
auto_save.enable_auto_save()
|
||||
assert auto_save._auto_save_enabled, "Auto-save should be enabled"
|
||||
|
||||
# Test data modification tracking
|
||||
auto_save.mark_data_modified()
|
||||
assert auto_save._data_modified, "Data should be marked as modified"
|
||||
|
||||
# Test force save
|
||||
auto_save.force_save()
|
||||
assert mock_save_callback.called, "Save callback should be called on force save"
|
||||
|
||||
# Test save with modifications
|
||||
auto_save.mark_data_modified()
|
||||
auto_save.force_save() # Call force_save again
|
||||
assert mock_save_callback.call_count >= 2, "Save should be called when data is modified"
|
||||
|
||||
# Test disabling auto-save
|
||||
auto_save.disable_auto_save()
|
||||
assert not auto_save._auto_save_enabled, "Auto-save should be disabled"
|
||||
|
||||
finally:
|
||||
auto_save.disable_auto_save()
|
||||
|
||||
print("Auto-save integration test passed!")
|
||||
|
||||
def test_search_filter_integration(self):
|
||||
"""Test search and filter system integration."""
|
||||
print("Testing search and filter integration...")
|
||||
|
||||
# Load test data
|
||||
test_data = pd.read_csv(self.test_csv)
|
||||
|
||||
data_filter = DataFilter()
|
||||
|
||||
# Test text search
|
||||
data_filter.set_search_term("Second")
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 1, "Text search failed"
|
||||
assert "Second entry" in filtered_data['note'].values
|
||||
|
||||
# Test date range filter
|
||||
data_filter.clear_all_filters()
|
||||
data_filter.set_date_range_filter("01/01/2024", "01/31/2024")
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 2, "Date range filter failed"
|
||||
|
||||
# Test medicine filter
|
||||
data_filter.clear_all_filters()
|
||||
data_filter.set_medicine_filter("medicine1", True) # Taken
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 2, "Medicine filter (taken) failed"
|
||||
|
||||
data_filter.set_medicine_filter("medicine1", False) # Not taken
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 1, "Medicine filter (not taken) failed"
|
||||
|
||||
# Test pathology range filter
|
||||
data_filter.clear_all_filters()
|
||||
data_filter.set_pathology_range_filter("pathology1", 5, 10)
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 2, "Pathology range filter failed"
|
||||
|
||||
# Test combined filters
|
||||
data_filter.clear_all_filters()
|
||||
data_filter.set_search_term("entry")
|
||||
data_filter.set_pathology_range_filter("pathology1", 7, 10)
|
||||
filtered_data = data_filter.apply_filters(test_data)
|
||||
assert len(filtered_data) == 2, "Combined filters failed"
|
||||
|
||||
# Test quick filters
|
||||
QuickFilters.last_week(data_filter)
|
||||
assert "date_range" in data_filter.active_filters, "Quick filter (last week) failed"
|
||||
|
||||
QuickFilters.last_month(data_filter)
|
||||
assert "date_range" in data_filter.active_filters, "Quick filter (last month) failed"
|
||||
|
||||
pathology_keys = self.pathology_manager.get_pathology_keys()
|
||||
if pathology_keys:
|
||||
QuickFilters.high_symptoms(data_filter, pathology_keys)
|
||||
assert "pathologies" in data_filter.active_filters, "Quick filter (high symptoms) failed"
|
||||
|
||||
def test_search_history_integration(self):
|
||||
"""Test search history functionality."""
|
||||
print("Testing search history integration...")
|
||||
|
||||
search_history = SearchHistory()
|
||||
|
||||
# Test adding searches
|
||||
test_searches = ["symptom search", "medication query", "date range"]
|
||||
for search in test_searches:
|
||||
search_history.add_search(search)
|
||||
|
||||
history = search_history.get_history()
|
||||
assert len(history) >= len(test_searches), "Search history not recording properly"
|
||||
|
||||
# Test search suggestions
|
||||
suggestions = search_history.get_suggestions("med")
|
||||
medication_suggestions = [s for s in suggestions if "med" in s.lower()]
|
||||
assert len(medication_suggestions) >= 0, "Search suggestions not working"
|
||||
|
||||
def test_complete_workflow_integration(self):
|
||||
"""Test complete workflow with all new features."""
|
||||
print("Testing complete workflow integration...")
|
||||
|
||||
# Initialize all systems
|
||||
mock_save_callback = MagicMock()
|
||||
auto_save = AutoSaveManager(
|
||||
save_callback=mock_save_callback,
|
||||
interval_minutes=5
|
||||
)
|
||||
data_filter = DataFilter()
|
||||
|
||||
try:
|
||||
# Step 1: Enable auto-save
|
||||
auto_save.enable_auto_save()
|
||||
|
||||
# Step 2: Validate new data entry
|
||||
new_date = "01/15/2024"
|
||||
new_note = "Workflow test entry"
|
||||
|
||||
date_valid, date_msg, _ = InputValidator.validate_date(new_date)
|
||||
note_valid, note_msg, _ = InputValidator.validate_note(new_note)
|
||||
|
||||
assert date_valid, f"Date validation failed: {date_msg}"
|
||||
assert note_valid, f"Note validation failed: {note_msg}"
|
||||
|
||||
score_valid, score_msg, _ = InputValidator.validate_pathology_score("6")
|
||||
assert score_valid, f"Score validation failed: {score_msg}"
|
||||
|
||||
# Step 3: Add validated data to file
|
||||
original_data = pd.read_csv(self.test_csv)
|
||||
new_row = pd.DataFrame({
|
||||
'date': [new_date],
|
||||
'note': [new_note],
|
||||
'medicine1': [0],
|
||||
'pathology1': [6]
|
||||
})
|
||||
updated_data = pd.concat([original_data, new_row], ignore_index=True)
|
||||
updated_data.to_csv(self.test_csv, index=False)
|
||||
|
||||
# Step 4: Mark data as modified for auto-save
|
||||
auto_save.mark_data_modified()
|
||||
auto_save.force_save()
|
||||
assert mock_save_callback.called, "Auto-save should trigger save callback"
|
||||
|
||||
# Step 5: Test filtering on updated data
|
||||
data_filter.set_search_term("Workflow")
|
||||
filtered_data = data_filter.apply_filters(updated_data)
|
||||
assert len(filtered_data) == 1, "Search filter failed on updated data"
|
||||
assert any("Workflow" in note for note in filtered_data['note'].values)
|
||||
|
||||
# Step 6: Test date range filter
|
||||
data_filter.clear_all_filters()
|
||||
data_filter.set_date_range_filter("01/14/2024", "01/16/2024") # Include both entries on 01/15
|
||||
filtered_data = data_filter.apply_filters(updated_data)
|
||||
assert len(filtered_data) == 2, "Date filter failed on new entry"
|
||||
|
||||
# Step 7: Test error handling with invalid operation
|
||||
try:
|
||||
# Simulate file operation error
|
||||
raise FileNotFoundError("Simulated file error")
|
||||
except FileNotFoundError as e:
|
||||
import logging
|
||||
test_logger = logging.getLogger("test")
|
||||
mock_ui_manager = MagicMock()
|
||||
error_handler = ErrorHandler(logger=test_logger, ui_manager=mock_ui_manager)
|
||||
error_handler.handle_error(e, "Test error handling", "Simulated error", show_dialog=False)
|
||||
|
||||
# Verify error was handled
|
||||
assert mock_ui_manager.update_status.called, "Error handling should update status"
|
||||
|
||||
# Step 8: Verify auto-save functionality
|
||||
assert auto_save._auto_save_enabled, "Auto-save should be enabled"
|
||||
auto_save.disable_auto_save()
|
||||
assert not auto_save._auto_save_enabled, "Auto-save should be disabled"
|
||||
|
||||
print("Complete workflow integration test passed!")
|
||||
|
||||
finally:
|
||||
auto_save.disable_auto_save()
|
||||
|
||||
def test_performance_under_load(self):
|
||||
"""Test system performance with larger datasets."""
|
||||
print("Testing performance under load...")
|
||||
|
||||
# Create larger dataset
|
||||
large_data = []
|
||||
for i in range(100):
|
||||
large_data.append({
|
||||
'date': f"01/{(i % 28) + 1:02d}/2024",
|
||||
'note': f"Entry number {i}",
|
||||
'medicine1': 1 if i % 2 == 0 else 0,
|
||||
'pathology1': (i % 10) + 1
|
||||
})
|
||||
|
||||
large_df = pd.DataFrame(large_data)
|
||||
large_csv = os.path.join(self.temp_dir, "large_data.csv")
|
||||
large_df.to_csv(large_csv, index=False)
|
||||
|
||||
# Test filtering performance
|
||||
data_filter = DataFilter()
|
||||
|
||||
start_time = time.time()
|
||||
data_filter.set_search_term("Entry")
|
||||
filtered_data = data_filter.apply_filters(large_df)
|
||||
search_time = time.time() - start_time
|
||||
|
||||
assert len(filtered_data) == 100, "Search filter failed on large dataset"
|
||||
assert search_time < 1.0, f"Search took too long: {search_time:.2f}s"
|
||||
|
||||
# Test auto-save performance
|
||||
mock_save_callback = MagicMock()
|
||||
auto_save = AutoSaveManager(
|
||||
save_callback=mock_save_callback,
|
||||
interval_minutes=5
|
||||
)
|
||||
|
||||
try:
|
||||
start_time = time.time()
|
||||
auto_save.enable_auto_save()
|
||||
auto_save.mark_data_modified()
|
||||
auto_save.force_save()
|
||||
save_time = time.time() - start_time
|
||||
|
||||
assert mock_save_callback.called, "Save callback should be called"
|
||||
assert save_time < 2.0, f"Save took too long: {save_time:.2f}s"
|
||||
|
||||
finally:
|
||||
auto_save.disable_auto_save()
|
||||
|
||||
print(f"Performance test completed: Search={search_time:.3f}s, Save={save_time:.3f}s")
|
||||
|
||||
Reference in New Issue
Block a user