import torch import torch.nn as nn from typing import Optional, Tuple import numpy as np from dataclasses import dataclass @dataclass class AIMarker: """Represents an AI-generated security marker""" embedding: torch.Tensor attention_pattern: torch.Tensor blockchain_hash: str dimension_signature: torch.Tensor class NurvSecureAIMarker(nn.Module): def __init__( self, hidden_dim: int = 2048, num_heads: int = 16, num_layers: int = 12, max_sequence_length: int = 1024 ): super().__init__() self.hidden_dim = hidden_dim self.num_heads = num_heads # Transformer-based marker generator self.embedding = nn.Linear(hidden_dim, hidden_dim) self.position_encoding = nn.Parameter( torch.randn(1, max_sequence_length, hidden_dim) ) # Multi-headed attention for dimensional mapping self.attention_layers = nn.ModuleList([ nn.MultiheadAttention(hidden_dim, num_heads) for _ in range(num_layers) ]) # Dimension mapping networks self.dimension_mapper = nn.Sequential( nn.Linear(hidden_dim, hidden_dim * 2), nn.GELU(), nn.Linear(hidden_dim * 2, hidden_dim) ) # Blockchain signature generator self.signature_generator = nn.Sequential( nn.Linear(hidden_dim, hidden_dim // 2), nn.GELU(), nn.Linear(hidden_dim // 2, 256) # 256-bit signatures ) def generate_marker( self, input_data: torch.Tensor, blockchain_data: Optional[torch.Tensor] = None ) -> AIMarker: """Generate an AI marker with blockchain integration""" batch_size = input_data.size(0) # Initial embedding x = self.embedding(input_data) x = x + self.position_encoding[:, :x.size(1), :] # Apply attention layers attention_patterns = [] for layer in self.attention_layers: attn_output, attn_pattern = layer( x, x, x, need_weights=True, average_attn_weights=False ) x = attn_output attention_patterns.append(attn_pattern) # Generate dimensional signature dim_signature = self.dimension_mapper(x) # Generate blockchain-compatible hash if blockchain_data is not None: x = torch.cat([x, blockchain_data], dim=-1) blockchain_hash = self._generate_hash(x) # Combine attention patterns combined_attention = torch.stack(attention_patterns).mean(0) return AIMarker( embedding=x, attention_pattern=combined_attention, blockchain_hash=blockchain_hash, dimension_signature=dim_signature ) def _generate_hash(self, x: torch.Tensor) -> str: """Generate a blockchain-compatible hash from tensor data""" signature = self.signature_generator(x) # Convert to hex string for blockchain compatibility hash_bytes = signature.detach().cpu().numpy().tobytes() return hash_bytes.hex() def verify_marker( self, marker: AIMarker, blockchain_data: Optional[torch.Tensor] = None ) -> Tuple[bool, float]: """Verify an AI marker's authenticity""" # Regenerate marker from embedded data new_marker = self.generate_marker(marker.embedding, blockchain_data) # Compare attention patterns attention_similarity = torch.cosine_similarity( marker.attention_pattern.flatten(), new_marker.attention_pattern.flatten(), dim=0 ) # Compare dimension signatures dim_similarity = torch.cosine_similarity( marker.dimension_signature.flatten(), new_marker.dimension_signature.flatten(), dim=0 ) # Verify blockchain hash hash_valid = marker.blockchain_hash == new_marker.blockchain_hash # Combined verification score verification_score = (attention_similarity + dim_similarity) / 2 return hash_valid and verification_score > 0.95, verification_score.item() # Example usage if __name__ == "__main__": # Initialize the AI marker generator marker_generator = NurvSecureAIMarker() # Create sample input data input_data = torch.randn(1, 64, 2048) # Batch_size=1, seq_len=64, hidden_dim=2048 blockchain_data = torch.randn(1, 64, 256) # Sample blockchain data # Generate marker marker = marker_generator.generate_marker(input_data, blockchain_data) # Verify marker is_valid, confidence = marker_generator.verify_marker(marker, blockchain_data) print(f"Marker verification: {'Success' if is_valid else 'Failed'}") print(f"Verification confidence: {confidence:.4f}")