# OP_CAT-IPFS Integration Troubleshooting Guide & Developer Onboarding ## Overview This comprehensive troubleshooting guide and developer onboarding materials provide solutions for common issues, debugging techniques, and learning resources for OP_CAT-IPFS integration. --- ## Troubleshooting Guide ### Common Issues and Solutions #### Issue 1: Script Size Exceeded ``` Error: Script size exceeds 520 bytes limit ``` **Symptoms:** - Script validation fails - Bitcoin transaction rejected - OP_CAT operations not executing **Causes:** - Too many operations in script - Large content identifiers - Inefficient operation sequence **Solutions:** ```python # Solution 1: Optimize script size def optimize_script_size(script: Dict) -> Dict: """Optimize script to reduce size.""" operations = script["operations"] # Remove redundant operations optimized_ops = [] for i, op in enumerate(operations): # Skip consecutive PUSH operations that can be combined if (i > 0 and op["op"].startswith("OP_PUSH") and operations[i-1]["op"].startswith("OP_PUSH")): # Combine with previous operation continue optimized_ops.append(op) script["operations"] = optimized_ops return script # Solution 2: Use content chunking def handle_large_content(content: bytes) -> Dict: """Handle large content through chunking.""" if len(content) > 1000000: # 1MB threshold # Use chunked approach chunk_size = 1024 * 1024 # 1MB chunks chunks = [content[i:i+chunk_size] for i in range(0, len(content), chunk_size)] return create_chunked_script(chunks) else: # Use basic approach return create_basic_script(content) # Solution 3: Minimize CID length def create_minimal_cid(content: bytes) -> str: """Create minimal CID for space efficiency.""" # Use shorter hash if security allows content_hash = hashlib.sha256(content).digest()[:16] # Truncate to 16 bytes cid_base32 = base64.b32encode(content_hash).decode('utf-8').lower() return f"bafy{cid_base32[:20]}" # Shorter CID ``` **Prevention:** - Always validate script size before deployment - Use chunking for large content - Optimize operation sequences --- #### Issue 2: Stack Depth Limit Exceeded ``` Error: Stack depth exceeded during execution ``` **Symptoms:** - Script execution fails - Stack overflow errors - Transaction validation failure **Causes:** - Too many PUSH operations without consumption - Inefficient concatenation patterns - Nested conditional structures **Solutions:** ```python # Solution 1: Monitor stack depth def monitor_stack_depth(operations: List[Dict]) -> Dict: """Monitor and analyze stack depth.""" stack_depth = 0 max_depth = 0 depth_history = [] for i, op in enumerate(operations): op_name = op["op"] # Track stack changes if op_name.startswith("OP_PUSH"): stack_depth += 1 elif op_name in ["OP_CAT", "OP_HASH256", "OP_EQUAL", "OP_EQUALVERIFY"]: stack_depth = max(0, stack_depth - 1) elif op_name in ["OP_1", "OP_0"]: stack_depth += 1 elif op_name in ["OP_DROP"]: stack_depth = max(0, stack_depth - 1) max_depth = max(max_depth, stack_depth) depth_history.append({ "operation": i, "op": op_name, "stack_depth": stack_depth }) return { "max_depth": max_depth, "final_depth": stack_depth, "depth_history": depth_history, "exceeds_limit": max_depth > 10 } # Solution 2: Optimize stack usage def optimize_stack_usage(operations: List[Dict]) -> List[Dict]: """Optimize operations for better stack usage.""" optimized = [] # Group consecutive PUSH operations push_group = [] for op in operations: if op["op"].startswith("OP_PUSH"): push_group.append(op) else: # Process accumulated PUSH operations if push_group: optimized.extend(push_group) push_group = [] optimized.append(op) # Add remaining PUSH operations if push_group: optimized.extend(push_group) return optimized # Solution 3: Use efficient concatenation patterns def create_efficient_concatenation_script(items: List[str]) -> Dict: """Create script with efficient concatenation.""" operations = [] # Use tree-based concatenation instead of linear def create_tree_concatenation(items, start=0, end=None): if end is None: end = len(items) if start >= end: return [] if end - start == 1: return [{"op": f"OP_PUSHBYTES_{len(items[start])}", "data": items[start]}] mid = (start + end) // 2 left_ops = create_tree_concatenation(items, start, mid) right_ops = create_tree_concatenation(items, mid, end) return left_ops + right_ops + [{"op": "OP_CAT"}] operations = create_tree_concatenation(items) return { "version": "OP_CAT_EFFICIENT_v1.0", "operations": operations, "metadata": { "item_count": len(items), "concatenation_pattern": "tree_based" } } ``` --- #### Issue 3: Invalid IPFS CID Format ``` Error: Invalid IPFS CID format ``` **Symptoms:** - CID validation fails - Content addressing errors - Integration failures **Causes:** - Incorrect CID prefix - Invalid base32 encoding - Wrong CID length **Solutions:** ```python # Solution 1: Comprehensive CID validation def validate_ipfs_cid_comprehensive(cid: str) -> Dict: """Comprehensive CID validation with detailed feedback.""" result = { "valid": False, "errors": [], "warnings": [], "suggestions": [] } # Basic format checks if not isinstance(cid, str): result["errors"].append("CID must be a string") return result if len(cid) < 10: result["errors"].append("CID too short") return result # Prefix validation valid_prefixes = ["bafy", "bafk", "bafybei", "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi"] prefix_match = any(cid.startswith(prefix) for prefix in valid_prefixes) if not prefix_match: result["errors"].append("Invalid CID prefix") result["suggestions"].append("Use valid prefixes: bafy, bafk, bafybei") return result # Extract and validate base32 part if cid.startswith("bafy"): base32_part = cid[4:] elif cid.startswith("bafk"): base32_part = cid[4:] else: base32_part = cid[7:] # Longer prefixes # Character validation valid_chars = set("abcdefghijklmnopqrstuvwxyz234567") invalid_chars = set(base32_part) - valid_chars if invalid_chars: result["errors"].append(f"Invalid characters: {', '.join(invalid_chars)}") result["suggestions"].append("Use only base32 characters: a-z, 2-7") # Length validation expected_lengths = [44, 49, 59] # Common CID lengths if len(base32_part) not in expected_lengths: result["warnings"].append(f"Unusual CID length: {len(base32_part)}") result["suggestions"].append(f"Expected lengths: {expected_lengths}") # Checksum validation (simplified) if not _verify_cid_checksum(cid): result["warnings"].append("CID checksum verification failed") result["suggestions"].append("Regenerate CID with proper checksum") result["valid"] = len(result["errors"]) == 0 return result # Solution 2: CID regeneration helper def regenerate_cid(content: bytes, cid_type: str = "default") -> str: """Regenerate CID with proper format.""" if cid_type == "default": # Standard bafy CID prefix = b'\x12' + b'\x20' # SHA2-256 content_hash = hashlib.sha256(content).digest() multihash = prefix + content_hash cid_prefix = b'\x55' # Raw codec elif cid_type == "chunk": # Chunk CID with bafk prefix prefix = b'\x56' + b'\x20' # Chunk codec content_hash = hashlib.sha256(content).digest() multihash = prefix + content_hash cid_prefix = b'\x55' else: raise ValueError(f"Unknown CID type: {cid_type}") cid_input = cid_prefix + multihash cid_hash = hashlib.sha256(cid_input).digest() cid_base32 = base64.b32encode(cid_hash).decode('utf-8').lower() if cid_type == "chunk": return f"bafk{cid_base32[:44]}" else: return f"bafy{cid_base32[:44]}" # Solution 3: CID format converter def convert_cid_format(old_cid: str, target_format: str) -> str: """Convert CID between different formats.""" # Extract the hash part if old_cid.startswith("bafy"): hash_part = old_cid[4:] elif old_cid.startswith("bafk"): hash_part = old_cid[4:] else: raise ValueError("Unsupported CID format") # Convert to target format if target_format == "bafy": return f"bafy{hash_part[:44]}" elif target_format == "bafk": return f"bafk{hash_part[:44]}" else: raise ValueError(f"Unknown target format: {target_format}") ``` --- #### Issue 4: Content Integrity Verification Failed ``` Error: Content integrity verification failed ``` **Symptoms:** - Hash mismatches - Content corruption detected - Verification failures **Causes:** - Content modification after hashing - Hash calculation errors - Transmission corruption **Solutions:** ```python # Solution 1: Multi-layer integrity verification def verify_content_integrity_comprehensive(content: bytes, expected_hashes: Dict) -> Dict: """Comprehensive content integrity verification.""" actual_hashes = {} verification_results = {} # Calculate multiple hashes hash_algorithms = { "sha256": lambda data: hashlib.sha256(data).hexdigest(), "sha512": lambda data: hashlib.sha512(data).hexdigest(), "blake2b": lambda data: hashlib.blake2b(data, digest_size=32).hexdigest(), "ripemd160": lambda data: hashlib.new('ripemd160', data).hexdigest() } for algorithm, hash_func in hash_algorithms.items(): actual_hashes[algorithm] = hash_func(content) if algorithm in expected_hashes: expected = expected_hashes[algorithm] actual = actual_hashes[algorithm] verification_results[algorithm] = { "valid": actual == expected, "expected": expected, "actual": actual, "match": actual == expected } # Overall verification all_valid = all(result["valid"] for result in verification_results.values()) return { "overall_valid": all_valid, "actual_hashes": actual_hashes, "verification_results": verification_results, "verified_algorithms": list(verification_results.keys()) } # Solution 2: Content repair suggestions def suggest_content_repair(content: bytes, expected_hash: str, algorithm: str = "sha256") -> Dict: """Suggest repairs for content integrity issues.""" actual_hash = hashlib.sha256(content).hexdigest() suggestions = [] # Check for common corruption patterns if len(content) == 0: suggestions.append("Content is empty - possible transmission failure") elif actual_hash != expected_hash: # Calculate Hamming distance (simplified) diff_count = sum(1 for a, b in zip(actual_hash, expected_hash) if a != b) if diff_count <= 4: suggestions.append("Minor corruption detected - try retransmission") elif diff_count <= 16: suggestions.append("Moderate corruption - check transmission channel") else: suggestions.append("Severe corruption - content may need complete regeneration") # Check for encoding issues try: content.decode('utf-8') except UnicodeDecodeError: suggestions.append("Encoding issue detected - verify character encoding") return { "actual_hash": actual_hash, "expected_hash": expected_hash, "differences": sum(1 for a, b in zip(actual_hash, expected_hash) if a != b), "suggestions": suggestions, "repair_possible": len(suggestions) > 0 } # Solution 3: Progressive verification def progressive_content_verification(content: bytes, expected_hash: str) -> Dict: """Progressive verification with early detection.""" chunk_size = 1024 content_hash = hashlib.sha256() for i in range(0, len(content), chunk_size): chunk = content[i:i+chunk_size] content_hash.update(chunk) # Early detection for obvious corruption if i == 0 and len(chunk) == 0: return { "valid": False, "stage": "initial", "error": "Empty content detected" } final_hash = content_hash.hexdigest() return { "valid": final_hash == expected_hash, "actual_hash": final_hash, "expected_hash": expected_hash, "verification_stages": ["initial", "progressive", "final"] } ``` --- ## Debugging Tools and Techniques ### Comprehensive Debugging Framework ```python # debugging_tools.py class OPCATDebugger: """Comprehensive debugging tools for OP_CAT-IPFS integration.""" def __init__(self, verbose: bool = False): self.verbose = verbose self.debug_log = [] self.breakpoints = set() def debug_script_execution(self, script: Dict, input_data: Dict = None) -> Dict: """Debug script execution with step-by-step analysis.""" print("=== Script Execution Debug ===\n") operations = script["operations"] stack = [] memory = {} execution_log = [] for i, op in enumerate(operations): op_name = op["op"] op_data = op.get("data", "") print(f"Step {i+1}: {op_name}") if self.verbose: print(f" Data: {op_data[:20]}..." if len(op_data) > 20 else f" Data: {op_data}") # Execute operation try: result = self._execute_operation(op_name, op_data, stack, memory) stack = result["stack"] memory = result["memory"] # Log execution log_entry = { "step": i+1, "operation": op_name, "stack_before": result["stack_before"], "stack_after": stack.copy(), "memory_changes": result["memory_changes"], "success": result["success"] } execution_log.append(log_entry) if self.verbose: print(f" Stack: {stack}") print(f" Memory: {list(memory.keys())}") if not result["success"]: print(f" ❌ Error: {result.get('error', 'Unknown error')}") break print(f" ✅ Success") except Exception as e: print(f" ❌ Exception: {str(e)}") execution_log.append({ "step": i+1, "operation": op_name, "error": str(e), "success": False }) break print() return { "execution_log": execution_log, "final_stack": stack, "final_memory": memory, "success": all(log["success"] for log in execution_log) } def _execute_operation(self, op_name: str, op_data: str, stack: List, memory: Dict) -> Dict: """Execute individual operation.""" stack_before = stack.copy() memory_changes = {} try: if op_name.startswith("OP_PUSH"): # Push operation stack.append(op_data) elif op_name == "OP_CAT": # Concatenation if len(stack) < 2: return {"success": False, "error": "Stack underflow for OP_CAT"} item1 = stack.pop() item2 = stack.pop() concatenated = item1 + item2 stack.append(concatenated) elif op_name == "OP_HASH256": # Hash operation if len(stack) < 1: return {"success": False, "error": "Stack underflow for OP_HASH256"} item = stack.pop() hashed = hashlib.sha256(item.encode()).hexdigest() stack.append(hashed) elif op_name == "OP_EQUAL": # Equality check if len(stack) < 2: return {"success": False, "error": "Stack underflow for OP_EQUAL"} item1 = stack.pop() item2 = stack.pop() equal = item1 == item2 stack.append("1" if equal else "0") elif op_name == "OP_EQUALVERIFY": # Equality verification if len(stack) < 2: return {"success": False, "error": "Stack underflow for OP_EQUALVERIFY"} item1 = stack.pop() item2 = stack.pop() if item1 != item2: return {"success": False, "error": "EQUALVERIFY failed"} elif op_name == "OP_1": stack.append("1") elif op_name == "OP_0": stack.append("0") elif op_name == "OP_DROP": if len(stack) < 1: return {"success": False, "error": "Stack underflow for OP_DROP"} stack.pop() else: return {"success": False, "error": f"Unknown operation: {op_name}"} return { "success": True, "stack": stack, "memory": memory, "stack_before": stack_before, "memory_changes": memory_changes } except Exception as e: return { "success": False, "error": str(e), "stack": stack_before, "memory": memory, "stack_before": stack_before, "memory_changes": memory_changes } def analyze_script_performance(self, script: Dict) -> Dict: """Analyze script performance characteristics.""" operations = script["operations"] # Count operation types op_counts = {} total_gas = 0 gas_costs = { "OP_PUSHBYTES": 1, "OP_CAT": 3, "OP_HASH256": 10, "OP_EQUAL": 2, "OP_EQUALVERIFY": 2, "OP_1": 1, "OP_0": 1, "OP_DROP": 1 } for op in operations: op_name = op["op"] # Categorize operation if op_name.startswith("OP_PUSHBYTES"): category = "OP_PUSHBYTES" else: category = op_name op_counts[category] = op_counts.get(category, 0) + 1 # Calculate gas cost gas_cost = gas_costs.get(category, 5) # Default cost total_gas += gas_cost # Calculate complexity metrics complexity_score = 0 complexity_score += len(operations) * 0.1 # Operation count complexity_score += op_counts.get("OP_CAT", 0) * 0.5 # CAT operations complexity_score += max(0, len(operations) - 20) * 0.2 # Large scripts return { "operation_counts": op_counts, "total_operations": len(operations), "estimated_gas": total_gas, "complexity_score": complexity_score, "performance_rating": self._get_performance_rating(complexity_score), "optimization_suggestions": self._get_optimization_suggestions(op_counts) } def _get_performance_rating(self, score: float) -> str: """Get performance rating based on complexity score.""" if score <= 2: return "Excellent" elif score <= 5: return "Good" elif score <= 10: return "Fair" else: return "Poor" def _get_optimization_suggestions(self, op_counts: Dict) -> List[str]: """Get optimization suggestions based on operation counts.""" suggestions = [] if op_counts.get("OP_PUSHBYTES", 0) > 10: suggestions.append("Consider reducing number of push operations") if op_counts.get("OP_CAT", 0) > 5: suggestions.append("Use tree-based concatenation pattern") if sum(op_counts.values()) > 30: suggestions.append("Consider splitting into multiple scripts") if op_counts.get("OP_HASH256", 0) > 3: suggestions.append("Cache hash results where possible") return suggestions ``` --- ## Developer Onboarding Materials ### New Developer Quick Start ```python # developer_onboarding.py class DeveloperOnboarding: """Comprehensive onboarding for new developers.""" def __init__(self): self.onboarding_steps = [] self.progress = {} def welcome_developer(self): """Welcome message and overview.""" print("🎉 Welcome to OP_CAT-IPFS Integration Development!") print("=" * 60) print() print("This onboarding program will help you:") print("✅ Understand the core concepts") print("✅ Set up your development environment") print("✅ Learn best practices") print("✅ Build your first integration") print("✅ Debug and troubleshoot issues") print() print("Let's get started!") print() def step_1_concepts_overview(self): """Step 1: Core concepts overview.""" print("📚 Step 1: Core Concepts Overview") print("-" * 40) concepts = { "OP_CAT": "Bitcoin script operation for concatenating stack elements", "IPFS": "InterPlanetary File System - content addressing and storage", "CID": "Content Identifier - unique address for content", "Starlight": "Integration bridge between Bitcoin and IPFS", "Script": "Sequence of Bitcoin operations", "Stack": "Data structure used by Bitcoin scripts" } for concept, description in concepts.items(): print(f"{concept}: {description}") print() print("Key relationships:") print("• OP_CAT enables combining content hashes with IPFS CIDs") print("• IPFS provides decentralized content storage") print("• Starlight bridges Bitcoin scripts with IPFS operations") print() def step_2_environment_setup(self): """Step 2: Development environment setup.""" print("🛠️ Step 2: Environment Setup") print("-" * 35) print("Required software:") print("• Python 3.8+") print("• Git") print("• Code editor (VS Code recommended)") print() print("Python packages to install:") packages = [ "hashlib", "base64", "datetime", "typing", "json" ] for package in packages: print(f"• {package}") print() print("Installation command:") print("pip install -r requirements.txt") print() def step_3_first_integration(self): """Step 3: Build first integration.""" print("🚀 Step 3: Your First Integration") print("-" * 40) # Simple example for new developers print("Let's create your first OP_CAT-IPFS integration:") print() # Step-by-step example content = b"Hello from new developer!" print(f"1. Content: {content}") content_hash = hashlib.sha256(content).hexdigest() print(f"2. Content hash: {content_hash}") # Simple CID generation cid_hash = hashlib.sha256(content).digest() cid_base32 = base64.b32encode(cid_hash).decode('utf-8').lower() ipfs_cid = f"bafy{cid_base32[:44]}" print(f"3. IPFS CID: {ipfs_cid}") print("4. ✅ Integration completed!") print() def step_4_best_practices(self): """Step 4: Best practices introduction.""" print("📋 Step 4: Best Practices") print("-" * 30) practices = [ "Always validate input data", "Check script size limits (520 bytes)", "Monitor stack depth (max 10)", "Use multiple hash algorithms for verification", "Implement comprehensive error handling", "Log all operations for debugging", "Test with various content sizes", "Follow security guidelines" ] for i, practice in enumerate(practices, 1): print(f"{i}. {practice}") print() def step_5_common_patterns(self): """Step 5: Common integration patterns.""" print("🔄 Step 5: Common Patterns") print("-" * 30) patterns = { "Basic Integration": "Single content with hash and CID", "Multi-Content": "Multiple content pieces aggregated", "Chunked Content": "Large content split into chunks", "Conditional": "Dynamic content based on conditions", "Validation": "Multi-layer content verification" } for pattern, description in patterns.items(): print(f"• {pattern}: {description}") print() def step_6_debugging_basics(self): """Step 6: Debugging fundamentals.""" print("🐛 Step 6: Debugging Basics") print("-" * 30) debugging_tips = [ "Use the debugger class for step-by-step execution", "Check stack depth during script execution", "Validate all inputs before processing", "Log operations for troubleshooting", "Test with known good inputs first", "Isolate problems by testing components separately" ] for i, tip in enumerate(debugging_tips, 1): print(f"{i}. {tip}") print() def step_7_resources_and_help(self): """Step 7: Resources and getting help.""" print("📖 Step 7: Resources & Help") print("-" * 35) resources = { "Documentation": "OP_CAT_IPFS_Technical_Guide.md", "Tutorials": "OP_CAT_IPFS_Tutorials.md", "Best Practices": "OP_CAT_IPFS_Best_Practices.md", "Code Examples": "op_cat_ipfs_examples.py", "Troubleshooting": "This guide" } for resource, location in resources.items(): print(f"• {resource}: {location}") print() print("Getting help:") print("• Check the troubleshooting guide first") print("• Review code examples for patterns") print("• Use the debugger for step-by-step analysis") print("• Consult the documentation for detailed information") print() def complete_onboarding(self): """Complete onboarding process.""" print("🎊 Onboarding Complete!") print("=" * 30) print() print("You're now ready to:") print("✅ Build OP_CAT-IPFS integrations") print("✅ Debug and troubleshoot issues") print("✅ Follow best practices") print("✅ Use the reference implementation") print() print("Next steps:") print("1. Try the quick start example") print("2. Work through the tutorials") print("3. Build your own integration") print("4. Contribute to the project") print() print("Happy coding! 🚀") def run_complete_onboarding(self): """Run complete onboarding program.""" self.welcome_developer() self.step_1_concepts_overview() self.step_2_environment_setup() self.step_3_first_integration() self.step_4_best_practices() self.step_5_common_patterns() self.step_6_debugging_basics() self.step_7_resources_and_help() self.complete_onboarding() ``` --- ## Learning Path and Curriculum ### Structured Learning Program ```python # learning_path.py class LearningPath: """Structured learning path for OP_CAT-IPFS integration.""" def __init__(self): self.modules = self._define_learning_modules() self.learner_progress = {} def _define_learning_modules(self) -> Dict: """Define learning modules and objectives.""" return { "beginner": { "title": "Beginner - Getting Started", "duration": "2-3 days", "objectives": [ "Understand basic OP_CAT operations", "Learn IPFS content addressing", "Set up development environment", "Create first simple integration" ], "lessons": [ "Introduction to OP_CAT", "IPFS basics and CIDs", "Environment setup", "Hello World integration" ], "exercises": [ "Create basic content script", "Generate and validate IPFS CID", "Debug simple script execution" ] }, "intermediate": { "title": "Intermediate - Advanced Patterns", "duration": "1-2 weeks", "objectives": [ "Master multi-content aggregation", "Implement chunked content processing", "Build conditional content scripts", "Apply security best practices" ], "lessons": [ "Multi-content patterns", "Chunked content processing", "Conditional scripts", "Security and validation" ], "exercises": [ "Build multi-content aggregator", "Implement chunked processor", "Create conditional content system", "Add security validation" ] }, "advanced": { "title": "Advanced - Production Systems", "duration": "2-3 weeks", "objectives": [ "Design production-ready systems", "Optimize performance", "Implement comprehensive testing", "Build monitoring and alerting" ], "lessons": [ "Production architecture", "Performance optimization", "Testing strategies", "Monitoring and debugging" ], "exercises": [ "Build production integration", "Optimize script performance", "Create test suite", "Implement monitoring" ] }, "expert": { "title": "Expert - Innovation and Research", "duration": "Ongoing", "objectives": [ "Contribute to project development", "Research new patterns", "Optimize core protocols", "Mentor other developers" ], "lessons": [ "Core protocol contributions", "Research methodologies", "Advanced optimization", "Teaching and mentoring" ], "exercises": [ "Contribute to reference implementation", "Research new integration patterns", "Optimize core algorithms", "Create educational content" ] } } def display_learning_path(self): """Display complete learning path.""" print("🎓 OP_CAT-IPFS Integration Learning Path") print("=" * 50) print() for level, module in self.modules.items(): print(f"📚 {module['title']}") print(f"⏱️ Duration: {module['duration']}") print() print("🎯 Objectives:") for objective in module["objectives"]: print(f" • {objective}") print() print("📖 Lessons:") for lesson in module["lessons"]: print(f" {len(lesson)}. {lesson}") print() print("💪 Exercises:") for exercise in module["exercises"]: print(f" • {exercise}") print() print("-" * 50) print() def get_recommended_start(self, prior_experience: str) -> str: """Get recommended starting level based on experience.""" recommendations = { "none": "beginner", "bitcoin": "beginner", "ipfs": "beginner", "scripting": "intermediate", "blockchain": "intermediate", "decentralized": "intermediate", "advanced": "advanced" } return recommendations.get(prior_experience.lower(), "beginner") def track_progress(self, learner_id: str, module: str, lesson: str, completed: bool): """Track learner progress.""" if learner_id not in self.learner_progress: self.learner_progress[learner_id] = {} if module not in self.learner_progress[learner_id]: self.learner_progress[learner_id][module] = {} self.learner_progress[learner_id][module][lesson] = completed def generate_certificate(self, learner_id: str, module: str) -> Dict: """Generate completion certificate.""" if module not in self.learner_progress[learner_id]: return {"error": "No progress found for module"} module_progress = self.learner_progress[learner_id][module] total_lessons = len(self.modules[module]["lessons"]) completed_lessons = sum(1 for completed in module_progress.values() if completed) if completed_lessons == total_lessons: return { "certificate": { "learner_id": learner_id, "module": module, "title": self.modules[module]["title"], "completed_at": datetime.datetime.now().isoformat(), "lessons_completed": completed_lessons, "total_lessons": total_lessons } } else: return { "error": "Module not completed", "progress": f"{completed_lessons}/{total_lessons} lessons" } ``` --- ## Quick Reference Cards ### Essential Commands and Patterns ```python # quick_reference.py class QuickReference: """Quick reference for common OP_CAT-IPFS operations.""" def __init__(self): self.reference_cards = self._create_reference_cards() def _create_reference_cards(self) -> Dict: """Create quick reference cards.""" return { "basic_script": { "purpose": "Create basic OP_CAT-IPFS script", "code": """ # Basic script creation content_hash = hashlib.sha256(content).hexdigest() ipfs_cid = generate_ipfs_cid(content) script = { "version": "OP_CAT_IPFS_v1.0", "operations": [ {"op": "OP_PUSHBYTES_32", "data": content_hash}, {"op": "OP_PUSHBYTES_49", "data": ipfs_cid}, {"op": "OP_CAT"}, {"op": "OP_HASH256"}, {"op": "OP_EQUALVERIFY"}, {"op": "OP_1"} ] } """, "notes": "Suitable for content < 1MB" }, "cid_validation": { "purpose": "Validate IPFS CID format", "code": """ def validate_cid(cid: str) -> bool: if not cid.startswith("bafy"): return False if len(cid) != 49: return False valid_chars = set("abcdefghijklmnopqrstuvwxyz234567") return all(c in valid_chars for c in cid[4:]) """, "notes": "Always validate CIDs before use" }, "script_validation": { "purpose": "Validate script constraints", "code": """ def validate_script(script: Dict) -> Dict: operations = script["operations"] script_size = calculate_script_size(operations) stack_depth = simulate_stack_depth(operations) return { "valid": script_size <= 520 and stack_depth <= 10, "script_size": script_size, "stack_depth": stack_depth } """, "notes": "Check before deployment" }, "error_handling": { "purpose": "Comprehensive error handling", "code": """ try: result = integrate_content(content) if not result["success"]: handle_integration_error(result) except ValidationError as e: handle_validation_error(e) except ProcessingError as e: handle_processing_error(e) """, "notes": "Handle all error types" }, "debugging": { "purpose": "Debug script execution", "code": """ debugger = OPCATDebugger(verbose=True) debug_result = debugger.debug_script_execution(script) if not debug_result["success"]: analyze_execution_log(debug_result["execution_log"]) """, "notes": "Use for troubleshooting" } } def display_reference(self, card_name: str): """Display specific reference card.""" if card_name not in self.reference_cards: print(f"Reference card '{card_name}' not found") return card = self.reference_cards[card_name] print(f"📋 Reference Card: {card['purpose']}") print("=" * 50) print() print("Code:") print(card["code"]) print() print("Notes:") print(card["notes"]) print() def list_all_references(self): """List all available reference cards.""" print("📚 Available Reference Cards:") print("-" * 35) for card_name in self.reference_cards: card = self.reference_cards[card_name] print(f"• {card_name}: {card['purpose']}") print() ``` --- This comprehensive troubleshooting guide and developer onboarding materials provide everything developers need to successfully implement, debug, and maintain OP_CAT-IPFS integrations for the Starlight project.