#!/usr/bin/env python3 """ Comprehensive Test Suite for IIT Mathematical Foundation This module provides extensive validation for all IIT mathematical operations, including edge cases, performance tests, and mathematical correctness verification. Author: IIT Implementation Team Version: 1.0 """ import math import itertools import unittest import json import datetime from typing import Dict, List, Tuple, Optional, Set, Any import collections from iit_core import ( IITCalculator, SystemState, Concept, CauseEffectStructure, ProbabilityDistribution, TransitionProbabilityMatrix, Partition ) from phi_algorithms import OptimizedPhiCalculator, PhiAnalyzer from causal_power import CausalPowerAnalyzer, PerturbationResult, Intervention from concept_structures import ConceptAnalyzer, RepertoireCalculator, RepertoireProfile class TestProbabilityDistributions(unittest.TestCase): """Test probability distribution calculations.""" def setUp(self): """Setup test fixtures.""" self.uniform_dist = ProbabilityDistribution({ (0, 0): 0.25, (0, 1): 0.25, (1, 0): 0.25, (1, 1): 0.25 }) self.deterministic_dist = ProbabilityDistribution({ (0, 0): 1.0, (0, 1): 0.0, (1, 0): 0.0, (1, 1): 0.0 }) self.skewed_dist = ProbabilityDistribution({ (0, 0): 0.7, (0, 1): 0.2, (1, 0): 0.08, (1, 1): 0.02 }) def test_entropy_calculations(self): """Test entropy calculations for different distributions.""" # Uniform distribution should have maximum entropy uniform_entropy = self.uniform_dist.entropy() expected_max = math.log2(4) # log2(4) = 2 self.assertAlmostEqual(uniform_entropy, expected_max, places=5) # Deterministic distribution should have zero entropy det_entropy = self.deterministic_dist.entropy() self.assertAlmostEqual(det_entropy, 0.0, places=5) # Skewed distribution should have intermediate entropy skewed_entropy = self.skewed_dist.entropy() self.assertGreater(skewed_entropy, 0.0) self.assertLess(skewed_entropy, expected_max) def test_kl_divergence_properties(self): """Test KL divergence mathematical properties.""" # KL divergence with itself should be zero kl_self = self.uniform_dist.kullback_leibler_divergence(self.uniform_dist) self.assertAlmostEqual(kl_self, 0.0, places=5) # KL divergence should be non-negative kl_uniform_skewed = self.uniform_dist.kullback_leibler_divergence(self.skewed_dist) self.assertGreaterEqual(kl_uniform_skewed, 0.0) # KL divergence is not symmetric kl_skewed_uniform = self.skewed_dist.kullback_leibler_divergence(self.uniform_dist) self.assertNotAlmostEqual(kl_uniform_skewed, kl_skewed_uniform, places=5) def test_variation_distance(self): """Test L1 variation distance.""" # Distance with itself should be zero dist_self = self.uniform_dist.variation_distance(self.uniform_dist) self.assertAlmostEqual(dist_self, 0.0, places=5) # Distance should be symmetric dist1 = self.uniform_dist.variation_distance(self.skewed_dist) dist2 = self.skewed_dist.variation_distance(self.uniform_dist) self.assertAlmostEqual(dist1, dist2, places=5) # Distance should be <= 1 self.assertLessEqual(dist1, 1.0) class TestPhiCalculations(unittest.TestCase): """Test Φ calculation algorithms.""" def setUp(self): """Setup test fixtures.""" self.calculator = OptimizedPhiCalculator(num_elements=3) self.setup_deterministic_system() def setup_deterministic_system(self): """Setup a deterministic 3-element system.""" states = [] for bits in itertools.product([0, 1], repeat=3): states.append(SystemState(bits, 1.0 / 8.0)) # Create more complex transitions to ensure concept generation for i, from_state in enumerate(states): # Primary transition to_state = states[(i + 1) % len(states)] self.calculator.tpm.add_transition(from_state, to_state, 0.8) # Add some specific interesting transitions for j, other_state in enumerate(states): if i != j: if sum(from_state.elements) == sum(other_state.elements): # Same number of 1s - add connection self.calculator.tpm.add_transition(from_state, other_state, 0.2) def test_phi_basic_properties(self): """Test basic Φ calculation properties.""" test_state = SystemState((1, 0, 1), 1.0) mechanism = {0, 1} purview = {1, 2} # Φ should be non-negative phi = self.calculator.compute_phi_fast(mechanism, purview, test_state) self.assertGreaterEqual(phi, 0.0) # Φ should be consistent across methods (approximately) phi_heuristic = self.calculator.compute_phi_fast(mechanism, purview, test_state, 'heuristic') phi_beam = self.calculator.compute_phi_fast(mechanism, purview, test_state, 'beam') # Methods should be roughly in the same ballpark self.assertLess(abs(phi_heuristic - phi_beam), 2.0) def test_phi_edge_cases(self): """Test edge cases in Φ calculations.""" test_state = SystemState((1, 1, 1), 1.0) # Empty mechanism or purview should give phi = 0 phi_empty_mechanism = self.calculator.compute_phi_fast(set(), {0, 1}, test_state) phi_empty_purview = self.calculator.compute_phi_fast({0, 1}, set(), test_state) self.assertAlmostEqual(phi_empty_mechanism, 0.0, places=5) self.assertAlmostEqual(phi_empty_purview, 0.0, places=5) # Single-element mechanisms should work phi_single = self.calculator.compute_phi_fast({0}, {1}, test_state) self.assertGreaterEqual(phi_single, 0.0) def test_concept_computation(self): """Test concept computation and validation.""" test_state = SystemState((1, 0, 1), 1.0) concepts = self.calculator.compute_concepts(test_state) # Should find some concepts self.assertGreater(len(concepts.concepts), 0) # All concepts should have positive phi for concept in concepts.concepts: self.assertGreater(concept.phi, 0.0) self.assertIsNotNone(concept.mechanism) self.assertIsNotNone(concept.purview) # Total phi should be sum of individual concept phis expected_total = sum(c.phi for c in concepts.concepts) self.assertAlmostEqual(concepts.total_phi, expected_total, places=5) # Normalized phi should be between 0 and 1 self.assertGreaterEqual(concepts.normalized_phi, 0.0) self.assertLessEqual(concepts.normalized_phi, 1.0) class TestCausalPower(unittest.TestCase): """Test causal power calculations.""" def setUp(self): """Setup test fixtures.""" self.calculator = IITCalculator(num_elements=3) self.setup_test_system() self.analyzer = CausalPowerAnalyzer(self.calculator) def setup_test_system(self): """Setup test system with specific causal structure.""" states = [] for bits in itertools.product([0, 1], repeat=3): prob = 1.0 / 8.0 states.append(SystemState(bits, prob)) # Setup causal chain: 0 -> 1 -> 2 for i, from_state in enumerate(states): for j, to_state in enumerate(states): if from_state.elements[0] == to_state.elements[1] and \ from_state.elements[1] == to_state.elements[2]: self.calculator.tpm.add_transition(from_state, to_state, 0.8) elif i == j: self.calculator.tpm.add_transition(from_state, to_state, 0.2) def test_causal_power_matrix(self): """Test causal power matrix computation.""" test_state = SystemState((1, 0, 1), 1.0) causal_matrix = self.analyzer.compute_causal_power_matrix(test_state) # Should have entries for all non-diagonal element pairs expected_pairs = [(i, j) for i in range(3) for j in range(3) if i != j] self.assertEqual(len(causal_matrix), len(expected_pairs)) # All causal power values should be non-negative for (source, target), power in causal_matrix.items(): self.assertGreaterEqual(power, 0.0) def test_perturbation_effects(self): """Test perturbation analysis.""" test_state = SystemState((1, 0, 1), 1.0) interventions = [ Intervention({0}, 'clamp', 1.0, 5), Intervention({1, 2}, 'noise', 0.5, 3) ] results = self.analyzer.compute_intervention_effects(test_state, interventions) # Should have results for all interventions self.assertEqual(len(results), len(interventions)) for result in results: self.assertIsInstance(result, PerturbationResult) self.assertGreaterEqual(result.causal_power, 0.0) self.assertGreaterEqual(result.stability_metric, 0.0) self.assertLessEqual(result.stability_metric, 1.0) def test_resilience_analysis(self): """Test system resilience analysis.""" test_state = SystemState((1, 1, 1), 1.0) resilience = self.analyzer.analyze_system_resilience(test_state) # Should have scores for all perturbation types for pert_type in ['clamp', 'noise', 'lesion']: self.assertIn(pert_type, resilience) self.assertGreaterEqual(resilience[pert_type], 0.0) self.assertLessEqual(resilience[pert_type], 1.0) class TestConceptStructures(unittest.TestCase): """Test concept structure analysis.""" def setUp(self): """Setup test fixtures.""" self.calculator = IITCalculator(num_elements=3) self.setup_test_system() self.repertoire_calc = RepertoireCalculator(self.calculator.tpm) self.analyzer = ConceptAnalyzer(self.calculator, self.repertoire_calc) def setup_test_system(self): """Setup test system for concept analysis.""" states = [] for bits in itertools.product([0, 1], repeat=3): prob = 1.0 / 8.0 states.append(SystemState(bits, prob)) # Add cyclic transitions with some complexity for i, from_state in enumerate(states): to_state = states[(i + 1) % len(states)] self.calculator.tpm.add_transition(from_state, to_state, 0.7) # Add some stochastic transitions for j, other_state in enumerate(states): if i != j and j % 2 == 0: self.calculator.tpm.add_transition(from_state, other_state, 0.3) def test_repertoire_profiles(self): """Test repertoire profile computation.""" test_state = SystemState((1, 0, 1), 1.0) mechanism = {0, 1} purview = {1, 2} # Test different repertoire computation methods methods = ['standard', 'bayesian', 'maximum_likelihood', 'entropy_minimization'] for method in methods: cause_profile = self.repertoire_calc.compute_cause_repertoire( test_state, mechanism, purview, method ) effect_profile = self.repertoire_calc.compute_effect_repertoire( test_state, mechanism, purview, method ) self.assertIsInstance(cause_profile, RepertoireProfile) self.assertIsInstance(effect_profile, RepertoireProfile) # Check basic properties self.assertGreaterEqual(cause_profile.entropy, 0.0) self.assertGreaterEqual(effect_profile.entropy, 0.0) self.assertGreaterEqual(cause_profile.specificity, 0.0) self.assertLessEqual(cause_profile.specificity, 1.0) def test_concept_clustering(self): """Test concept clustering.""" test_state = SystemState((1, 0, 1), 1.0) concepts = self.calculator.compute_concepts(test_state) if concepts.concepts: clusters = self.analyzer.cluster_concepts(concepts.concepts, similarity_threshold=0.5) # Should have at least one cluster self.assertGreater(len(clusters), 0) # All concepts should be assigned to clusters clustered_concepts = set() for cluster in clusters: clustered_concepts.update(cluster.concepts) self.assertEqual(len(clustered_concepts), len(concepts.concepts)) # Check cluster properties for cluster in clusters: self.assertGreaterEqual(cluster.avg_phi, 0.0) self.assertGreater(len(cluster.concepts), 0) def test_concept_hierarchy(self): """Test concept hierarchy building.""" test_state = SystemState((1, 0, 1), 1.0) concepts = self.calculator.compute_concepts(test_state) if concepts.concepts: structure = self.analyzer.build_concept_hierarchy(concepts.concepts) self.assertIsInstance(structure, type(concepts)) self.assertGreater(len(structure), 0) self.assertGreaterEqual(structure.structural_complexity, 0.0) class TestMathematicalValidation(unittest.TestCase): """Test mathematical correctness and invariants.""" def test_information_theory_consistency(self): """Test information theory consistency across calculations.""" # Create a simple test distribution dist = ProbabilityDistribution({ (0,): 0.3, (1,): 0.7 }) # Entropy should be consistent entropy = dist.entropy() self.assertGreater(entropy, 0.0) self.assertLess(entropy, math.log2(2)) # KL divergence properties uniform_dist = ProbabilityDistribution({(0,): 0.5, (1,): 0.5}) kl_div = dist.kullback_leibler_divergence(uniform_dist) self.assertGreaterEqual(kl_div, 0.0) # Variation distance bounds var_dist = dist.variation_distance(uniform_dist) self.assertGreaterEqual(var_dist, 0.0) self.assertLessEqual(var_dist, 1.0) def test_partition_consistency(self): """Test partition mathematical consistency.""" mechanism = {0, 1, 2} purview = {0, 1, 2} # Test partition creation partition1 = Partition({0}, {1, 2}, {0}, {1, 2}) partition2 = Partition({1, 2}, {0}, {1, 2}, {0}) # Partitions should be valid self.assertEqual(partition1.mechanism_1, {0}) self.assertEqual(partition1.mechanism_2, {1, 2}) self.assertEqual(partition1.purview_1, {0}) self.assertEqual(partition1.purview_2, {1, 2}) def test_system_state_consistency(self): """Test system state mathematical properties.""" state1 = SystemState((0, 1, 0), 0.5) state2 = SystemState((1, 0, 1), 0.5) # Hash should be based on elements hash1 = hash(state1) hash2 = hash(state2) self.assertNotEqual(hash1, hash2) # Length should be number of elements self.assertEqual(len(state1), 3) self.assertEqual(len(state2), 3) class PerformanceBenchmarks(unittest.TestCase): """Performance benchmarking tests.""" def test_phi_computation_performance(self): """Benchmark Φ computation performance.""" sizes = [2, 3, 4] # Keep small for practical testing results = {} for size in sizes: calculator = OptimizedPhiCalculator(num_elements=size) # Setup test system states = [] for bits in itertools.product([0, 1], repeat=size): states.append(SystemState(bits, 1.0 / (2 ** size))) for i, from_state in enumerate(states): to_state = states[(i + 1) % len(states)] calculator.tpm.add_transition(from_state, to_state, 1.0) test_state = SystemState(tuple([1] * size), 1.0) mechanism = {0, 1} if size >= 2 else {0} purview = {1, 2} if size >= 3 else {1} if size >= 2 else {0} # Time the computation start_time = datetime.datetime.now() phi = calculator.compute_phi_fast(mechanism, purview, test_state, 'heuristic') end_time = datetime.datetime.now() computation_time = (end_time - start_time).total_seconds() results[size] = computation_time # Should complete in reasonable time self.assertLess(computation_time, 5.0) # 5 seconds max # Performance should scale roughly exponentially if len(results) >= 2: sizes_list = sorted(results.keys()) time_2 = results[sizes_list[0]] time_3 = results[sizes_list[1]] # Should not grow too fast (within 10x per element) self.assertLess(time_3 / time_2, 10.0) def run_comprehensive_validation(): """Run the complete validation suite.""" print("Running Comprehensive IIT Mathematical Validation") print("=" * 60) # Create test suite test_suite = unittest.TestSuite() # Add test cases test_classes = [ TestProbabilityDistributions, TestPhiCalculations, TestCausalPower, TestConceptStructures, TestMathematicalValidation, PerformanceBenchmarks ] for test_class in test_classes: tests = unittest.TestLoader().loadTestsFromTestCase(test_class) test_suite.addTests(tests) # Run tests runner = unittest.TextTestRunner(verbosity=2) result = runner.run(test_suite) # Generate report report = { 'validation_date': datetime.datetime.now().isoformat(), 'tests_run': result.testsRun, 'failures': len(result.failures), 'errors': len(result.errors), 'success_rate': (result.testsRun - len(result.failures) - len(result.errors)) / result.testsRun if result.testsRun > 0 else 0, 'validation_passed': len(result.failures) == 0 and len(result.errors) == 0 } if result.failures: report['failure_details'] = [] for test, traceback in result.failures: report['failure_details'].append({ 'test': str(test), 'error': traceback }) if result.errors: report['error_details'] = [] for test, traceback in result.errors: report['error_details'].append({ 'test': str(test), 'error': traceback }) return report def generate_complexity_analysis(): """Generate algorithmic complexity analysis.""" analysis = { 'phi_algorithms': { 'heuristic_search': 'O(n^3) where n is system size', 'beam_search': 'O(k * n^3) where k is beam width', 'exhaustive_search': 'O(2^(n^2)) exponential complexity' }, 'causal_power_analysis': { 'pairwise_causal_power': 'O(n^2 * m) where m is state space size', 'perturbation_analysis': 'O(p * n * m) where p is number of perturbations', 'resilience_analysis': 'O(p * i * m) where i is intensity levels' }, 'concept_structure_analysis': { 'concept_generation': 'O(2^n * 2^(2n)) exponential in system size', 'clustering': 'O(k^2) where k is number of concepts', 'hierarchy_building': 'O(k^2) for k concepts', 'repertoire_computation': 'O(m) where m is transition count' }, 'scalability_limits': { 'practical_system_size': '4-5 elements for exhaustive analysis', 'heuristic_methods': 'Up to 8-10 elements', 'approximate_methods': 'Potentially larger systems with further optimization' }, 'memory_requirements': { 'transition_matrix': 'O(2^n * 2^n) for full TPM', 'concept_storage': 'O(k * n) where k is number of concepts', 'partition_cache': 'O(p * n) where p is partition count' } } return analysis if __name__ == "__main__": # Run validation validation_report = run_comprehensive_validation() print(f"\nValidation Summary:") print(f"Tests run: {validation_report['tests_run']}") print(f"Failures: {validation_report['failures']}") print(f"Errors: {validation_report['errors']}") print(f"Success rate: {validation_report['success_rate']:.2%}") print(f"Validation passed: {validation_report['validation_passed']}") # Generate complexity analysis complexity = generate_complexity_analysis() print(f"\nAlgorithmic Complexity Analysis:") for category, details in complexity.items(): print(f"\n{category.upper()}:") if isinstance(details, dict): for method, complexity_desc in details.items(): print(f" {method}: {complexity_desc}") else: print(f" {details}") # Save reports with open('validation_report.json', 'w') as f: json.dump(validation_report, f, indent=2) with open('complexity_analysis.json', 'w') as f: json.dump(complexity, f, indent=2) print(f"\nReports saved to validation_report.json and complexity_analysis.json") if validation_report['validation_passed']: print("✅ All mathematical validations passed!") else: print("❌ Some validations failed. Check detailed report.")