montana/Русский/Бот/timechain.py

2442 lines
109 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
"""
TimeChain 4-layer hierarchical time blockchain
Montana Protocol v3.3 NTS Anchor (36 global atomic servers)
Иерархическая матрёшка:
- τ₁ (60s): Атомарный слайс. Транзакции + presence proofs. Горизонтальная цепь.
- τ₂ (600s): Эмиссия. 10 τ₁ headers + merkle root. Coinbase транзакции.
- τ₃ (14d): Чекпоинт. 2016 τ₂ headers + merkle root. Adaptive Cooldown.
- τ₄ (4y): Халвинг. 104 τ₃ headers + merkle root. Новый коэффициент.
Двойная связность:
- Горизонтальная: prev_hash внутри каждого слоя (τ₁τ₁τ₁)
- Вертикальная: каждый верхний слайс вкладывает все заголовки нижнего
Post-quantum security: ML-DSA-65 (FIPS 204) signatures on every time window.
Security audit: GPT-5.2 R1-R6 + Gemini R1-R4 (2026-02-20) 28 vulnerabilities fixed.
NTS Anchor: 36 global NTS servers × TLS 1.3 × RFC 8915 chain recalculation impossible.
"""
import hashlib
import json
import math
import sqlite3
import time
import logging
import threading
from dataclasses import dataclass, field
from typing import List, Optional, Tuple, Dict
from node_crypto import sign_message, verify_signature
from transaction import Transaction, UTXOSet, validate_transaction
from nts_anchor import NTSAnchorService, NTSAnchorBlock, compute_anchor_hash
logger = logging.getLogger("timechain")
# ═══════════════════════════════════════════════════════════════════════════════
# PROTOCOL CONSTANTS
# ═══════════════════════════════════════════════════════════════════════════════
GENESIS_HASH = "0" * 64
# 09.01.2026 00:00:00 MSK (UTC+3) = 08.01.2026 21:00:00 UTC
GENESIS_TIMESTAMP_NS = 1_736_362_800_000_000_000
# Temporal coordinates
TAU1_SECONDS = 60 # 1 minute
TAU2_SECONDS = 600 # 10 minutes
TAU3_SECONDS = 1_209_600 # 14 days
TAU4_SECONDS = 126_144_000 # 4 years
TAU1_PER_TAU2 = 10 # 10 τ₁ = 1 τ₂
TAU2_PER_TAU3 = 2016 # 2016 τ₂ = 1 τ₃
TAU3_PER_TAU4 = 104 # 104 τ₃ = 1 τ₄
# TIME_BANK
TIME_BANK_TOTAL_SECONDS = 1_262_304_000 # 21,000,000 minutes ≈ 40 years
# Security: максимальное отклонение timestamp от реального времени (5 минут)
MAX_TIMESTAMP_DRIFT_NS = 5 * 60 * 1_000_000_000
# Security: минимальный интервал между τ₁ окнами (30 секунд — защита от sybil)
MIN_TAU1_GAP_NS = 30 * 1_000_000_000
# ML-DSA-65 key sizes (hex)
EXPECTED_PUBKEY_HEX_LEN = 3904 # 1952 bytes = 3904 hex chars
EXPECTED_SIGNATURE_HEX_LEN = 6618 # 3309 bytes = 6618 hex chars
# DoS protection: size limits per window (GPT-5.2 fix #12)
MAX_TX_PER_TAU1 = 1000 # Max transactions per τ₁ window
MAX_PROOFS_PER_TAU1 = 100 # Max presence proof hashes per τ₁
MAX_COINBASE_PER_TAU2 = 100 # Max coinbase transactions per τ₂
# SQLite INTEGER safety (Gemini R3 fix #2): prevent int64 overflow
# SQLite stores INTEGER as signed 64-bit (max 9,223,372,036,854,775,807)
# We cap at 2^62 to leave headroom for SUM() operations
MAX_SAFE_AMOUNT = 4_611_686_018_427_387_903 # 2^62 - 1
# Chain length sanity limit (GPT-5.2 R5 fix #3): prevent OOM in verification
# ~190 years of τ₁ windows at 1 per minute
MAX_CHAIN_LENGTH = 100_000_000
# ═══════════════════════════════════════════════════════════════════════════════
# MERKLE TREE (Domain-Separated)
# ═══════════════════════════════════════════════════════════════════════════════
# Domain separation prefixes (GPT-5.2 fix #8)
MERKLE_LEAF_PREFIX = b'\x00'
MERKLE_NODE_PREFIX = b'\x01'
def _validate_hex_hash(h: str) -> bool:
"""Validate that a string is a 64-char lowercase hex hash."""
return len(h) == 64 and all(c in '0123456789abcdef' for c in h)
def _is_power_of_half(value: float) -> bool:
"""
GPT-5.2 R4 fix #2: Check if value is a power of 1/2 (exactly representable in IEEE 754).
Valid: 1.0, 0.5, 0.25, 0.125, 0.0625, ...
These are the only halving coefficients Montana Protocol uses.
"""
if value <= 0 or value > 1.0:
return False
# Multiply by 2 until we reach 1.0
v = value
for _ in range(64): # max 64 halvings
if v == 1.0:
return True
v *= 2.0
return False
def merkle_root(hashes: List[str]) -> str:
"""
Вычисляет Merkle root (SHA-256) с domain separation.
Security (GPT-5.2 fix #8):
- Leaf prefix 0x00 / Node prefix 0x01 (prevents second preimage)
- Count leaf for protection against extension attacks
- All inputs validated as 64-char hex hashes
- Uses bytes (not strings) for concatenation
"""
if not hashes:
return hashlib.sha256(b"EMPTY_MERKLE").hexdigest()
# Validate all hashes
for h in hashes:
if not _validate_hex_hash(h):
raise ValueError(f"Invalid hash in merkle tree: {h[:20]}... (len={len(h)})")
# Domain-separated leaf hashing
level = [
hashlib.sha256(MERKLE_LEAF_PREFIX + bytes.fromhex(h)).hexdigest()
for h in hashes
]
# Count leaf for second preimage / extension protection
count_leaf = hashlib.sha256(
MERKLE_LEAF_PREFIX + f"COUNT:{len(hashes)}".encode()
).hexdigest()
level.append(count_leaf)
# Pad to power of 2
while len(level) & (len(level) - 1):
level.append(level[-1])
# Build tree with domain-separated internal nodes
while len(level) > 1:
next_level = []
for i in range(0, len(level), 2):
combined = (
MERKLE_NODE_PREFIX
+ bytes.fromhex(level[i])
+ bytes.fromhex(level[i + 1])
)
next_level.append(hashlib.sha256(combined).hexdigest())
level = next_level
return level[0]
def _safe_json_loads(raw: str, default):
"""Safely parse JSON, returning default on corruption (Gemini fix #2 v2)."""
try:
return json.loads(raw)
except (json.JSONDecodeError, ValueError):
logger.warning(f"Corrupted JSON, resetting to default: {raw[:50]}...")
return default
# Domain separation prefix for accumulator (Gemini R2 fix #5)
ACCUMULATOR_PREFIX = b'MONTANA_ACCUMULATOR:'
def compute_accumulator(prev_accumulator: str, window_hash: str) -> str:
"""
Кумулятивный аккумулятор цепочки с domain separation.
accumulator(N) = SHA256(DOMAIN_PREFIX + bytes(accumulator(N-1)) + bytes(window_hash(N)))
Gemini R2 fix #5: Domain-separated to prevent concatenation ambiguity.
Свойства:
- accumulator(N) является криптографическим коммитом ко ВСЕЙ истории
от генезиса до окна N
- Изменение ЛЮБОГО исторического окна ломает аккумулятор ВСЕХ последующих
- O(1) вычисление для каждого нового окна
- Невозможно подделать без знания всей цепи
"""
combined = ACCUMULATOR_PREFIX + prev_accumulator.encode() + b':' + window_hash.encode()
return hashlib.sha256(combined).hexdigest()
# ═══════════════════════════════════════════════════════════════════════════════
# ОПРЕДЕЛЕНИЯ ОКОН ВРЕМЕНИ
# ═══════════════════════════════════════════════════════════════════════════════
@dataclass
class Tau1Window:
"""
τ₁ Окно времени (1 минута) атомарная единица Таймчейна.
IMMUTABILITY каждое τ₁ содержит:
- prev_tau1_hash: хеш предыдущего окна (горизонтальная цепь)
- prev_accumulator: running accumulator ПЕРЕД этим окном криптографический
коммит ко ВСЕЙ истории τ₁ от генезиса до предыдущего окна.
Изменение ЛЮБОГО окна в прошлом ломает prev_accumulator ВСЕХ последующих.
- Транзакции (UTXO transfers + coinbase)
- Хеши presence proofs
- ML-DSA-65 подпись узла-создателя
"""
timestamp: int # Наносекунды UTC
prev_tau1_hash: str # Хеш предыдущего τ₁ (genesis = GENESIS_HASH)
transactions: List[Dict] # Транзакции (serialized)
tx_merkle_root: str # Merkle root транзакций
presence_proof_hashes: List[str] # Хеши presence proofs
window_number: int # Последовательный номер окна времени
node_id: str # mt... адрес узла-создателя
prev_accumulator: str = "" # Running accumulator ПЕРЕД этим окном (GPT-5.2 fix #3)
signature: str = "" # ML-DSA-65 подпись window_hash
def window_hash(self) -> str:
"""SHA-256 хеш окна времени (без подписи — подпись не влияет на хеш)"""
data = {
"node_id": self.node_id,
"presence_proof_hashes": self.presence_proof_hashes,
"prev_accumulator": self.prev_accumulator,
"prev_tau1_hash": self.prev_tau1_hash,
"timestamp": self.timestamp,
"tx_merkle_root": self.tx_merkle_root,
"window_number": self.window_number,
}
canonical = json.dumps(data, sort_keys=True, ensure_ascii=True)
return hashlib.sha256(canonical.encode()).hexdigest()
def to_dict(self) -> Dict:
return {
"timestamp": self.timestamp,
"prev_tau1_hash": self.prev_tau1_hash,
"transactions": self.transactions,
"tx_merkle_root": self.tx_merkle_root,
"presence_proof_hashes": self.presence_proof_hashes,
"window_number": self.window_number,
"node_id": self.node_id,
"prev_accumulator": self.prev_accumulator,
"signature": self.signature,
"window_hash": self.window_hash(),
}
@classmethod
def from_dict(cls, d: Dict) -> "Tau1Window":
return cls(
timestamp=d["timestamp"],
prev_tau1_hash=d["prev_tau1_hash"],
transactions=d.get("transactions", []),
tx_merkle_root=d.get("tx_merkle_root", ""),
presence_proof_hashes=d.get("presence_proof_hashes", []),
window_number=d.get("window_number", d.get("block_number", 0)),
node_id=d["node_id"],
prev_accumulator=d.get("prev_accumulator", ""),
signature=d.get("signature", ""),
)
@dataclass
class Tau2Window:
"""
τ₂ Окно времени (10 минут) слой эмиссии.
МАТРЁШКА уровень 1 содержит:
- 10 хешей τ₁ + Merkle root (вертикальная вложенность)
- tau1_accumulator: аккумулятор ВСЕЙ истории τ₁ на момент финализации
- prev_accumulator: running accumulator ВСЕЙ истории τ₂ ПЕРЕД этим окном
- Coinbase-транзакции (эмиссия Ɉ)
- Горизонтальная связь (prev_tau2_hash)
"""
timestamp: int
prev_tau2_hash: str
tau1_headers: List[str] # 10 хешей τ₁ окон
tau1_merkle_root: str # Merkle root из 10 хешей
coinbase_txs: List[Dict] # Coinbase транзакции (serialized)
window_number: int
node_id: str
total_emissions: int # Всего Ɉ эмитировано в этом τ₂
halving_coefficient: float # Текущий коэффициент
time_bank_remaining: int # Оставшийся резерв TIME_BANK (секунды)
tau1_accumulator: str = "" # Аккумулятор ВСЕЙ истории τ₁ (коммит к полной τ₁ цепи)
prev_accumulator: str = "" # Running accumulator τ₂ ПЕРЕД этим окном (GPT-5.2 fix #3)
nts_anchor_hash: str = "" # NTS anchor: SHA-256 of 36-server attestations (v3.17.0)
signature: str = ""
def content_hash(self) -> str:
"""
Hash of window content WITHOUT NTS anchor.
This is what NTS servers attest to the pre-anchor transaction data.
"""
data = {
"halving_coefficient": self.halving_coefficient,
"node_id": self.node_id,
"prev_accumulator": self.prev_accumulator,
"prev_tau2_hash": self.prev_tau2_hash,
"tau1_accumulator": self.tau1_accumulator,
"tau1_merkle_root": self.tau1_merkle_root,
"time_bank_remaining": self.time_bank_remaining,
"timestamp": self.timestamp,
"total_emissions": self.total_emissions,
"window_number": self.window_number,
}
return hashlib.sha256(json.dumps(data, sort_keys=True).encode()).hexdigest()
def window_hash(self) -> str:
"""
Full hash including NTS anchor binding.
NTS anchor makes this hash unforgeable without 12+ global atomic server attestations.
"""
data = {
"halving_coefficient": self.halving_coefficient,
"node_id": self.node_id,
"nts_anchor_hash": self.nts_anchor_hash,
"prev_accumulator": self.prev_accumulator,
"prev_tau2_hash": self.prev_tau2_hash,
"tau1_accumulator": self.tau1_accumulator,
"tau1_merkle_root": self.tau1_merkle_root,
"time_bank_remaining": self.time_bank_remaining,
"timestamp": self.timestamp,
"total_emissions": self.total_emissions,
"window_number": self.window_number,
}
return hashlib.sha256(json.dumps(data, sort_keys=True).encode()).hexdigest()
def to_dict(self) -> Dict:
return {
"timestamp": self.timestamp,
"prev_tau2_hash": self.prev_tau2_hash,
"tau1_headers": self.tau1_headers,
"tau1_merkle_root": self.tau1_merkle_root,
"coinbase_txs": self.coinbase_txs,
"window_number": self.window_number,
"node_id": self.node_id,
"total_emissions": self.total_emissions,
"halving_coefficient": self.halving_coefficient,
"time_bank_remaining": self.time_bank_remaining,
"tau1_accumulator": self.tau1_accumulator,
"prev_accumulator": self.prev_accumulator,
"nts_anchor_hash": self.nts_anchor_hash,
"signature": self.signature,
"window_hash": self.window_hash(),
}
@classmethod
def from_dict(cls, d: Dict) -> "Tau2Window":
return cls(
timestamp=d["timestamp"],
prev_tau2_hash=d["prev_tau2_hash"],
tau1_headers=d.get("tau1_headers", []),
tau1_merkle_root=d.get("tau1_merkle_root", ""),
coinbase_txs=d.get("coinbase_txs", []),
window_number=d.get("window_number", d.get("block_number", 0)),
node_id=d["node_id"],
total_emissions=d.get("total_emissions", 0),
halving_coefficient=d.get("halving_coefficient", 1.0),
time_bank_remaining=d.get("time_bank_remaining", 0),
tau1_accumulator=d.get("tau1_accumulator", ""),
prev_accumulator=d.get("prev_accumulator", ""),
nts_anchor_hash=d.get("nts_anchor_hash", ""),
signature=d.get("signature", ""),
)
@dataclass
class Tau3Window:
"""
τ₃ Окно времени (14 дней) чекпоинт сети.
МАТРЁШКА уровень 2 содержит:
- 2016 хешей τ₂ + Merkle root (вертикальная вложенность)
- tau2_accumulator: коммит ко ВСЕЙ истории τ₂ (и через неё ко всем τ₁)
- prev_accumulator: коммит ко ВСЕЙ истории τ₃ ПЕРЕД этим окном
- Adaptive Cooldown (медиана нагрузки за 56 дней = 4×τ₃)
"""
timestamp: int
prev_tau3_hash: str
tau2_headers: List[str] # 2016 хешей τ₂
tau2_merkle_root: str
window_number: int
node_id: str
epoch_number: int # Номер эпохи (26 в год)
total_emissions_epoch: int # Всего Ɉ за эту эпоху
adaptive_cooldown: float # Сглаженная медиана нагрузки
tau2_accumulator: str = "" # Аккумулятор ВСЕЙ истории τ₂
prev_accumulator: str = "" # Running accumulator τ₃ ПЕРЕД этим окном (GPT-5.2 fix #3)
signature: str = ""
def window_hash(self) -> str:
data = {
"adaptive_cooldown": self.adaptive_cooldown,
"epoch_number": self.epoch_number,
"node_id": self.node_id,
"prev_accumulator": self.prev_accumulator,
"prev_tau3_hash": self.prev_tau3_hash,
"tau2_accumulator": self.tau2_accumulator,
"tau2_merkle_root": self.tau2_merkle_root,
"timestamp": self.timestamp,
"total_emissions_epoch": self.total_emissions_epoch,
"window_number": self.window_number,
}
return hashlib.sha256(json.dumps(data, sort_keys=True).encode()).hexdigest()
def to_dict(self) -> Dict:
return {
"timestamp": self.timestamp,
"prev_tau3_hash": self.prev_tau3_hash,
"tau2_headers": self.tau2_headers,
"tau2_merkle_root": self.tau2_merkle_root,
"window_number": self.window_number,
"node_id": self.node_id,
"epoch_number": self.epoch_number,
"total_emissions_epoch": self.total_emissions_epoch,
"adaptive_cooldown": self.adaptive_cooldown,
"tau2_accumulator": self.tau2_accumulator,
"prev_accumulator": self.prev_accumulator,
"signature": self.signature,
"window_hash": self.window_hash(),
}
@classmethod
def from_dict(cls, d: Dict) -> "Tau3Window":
return cls(
timestamp=d["timestamp"],
prev_tau3_hash=d["prev_tau3_hash"],
tau2_headers=d.get("tau2_headers", []),
tau2_merkle_root=d.get("tau2_merkle_root", ""),
window_number=d.get("window_number", d.get("block_number", 0)),
node_id=d["node_id"],
epoch_number=d.get("epoch_number", 0),
total_emissions_epoch=d.get("total_emissions_epoch", 0),
adaptive_cooldown=d.get("adaptive_cooldown", 1.0),
tau2_accumulator=d.get("tau2_accumulator", ""),
prev_accumulator=d.get("prev_accumulator", ""),
signature=d.get("signature", ""),
)
@dataclass
class Tau4Window:
"""
τ₄ Окно времени (4 года) халвинг.
МАТРЁШКА уровень 3 (верхний) содержит:
- 104 хеша τ₃ + Merkle root (вертикальная вложенность)
- tau3_accumulator: коммит ко ВСЕЙ истории τ₃ (и через неё ко всем τ₂ и τ₁)
- prev_accumulator: коммит ко ВСЕЙ истории τ₄ ПЕРЕД этим окном
- Халвинг: halving_coefficient /= 2
"""
timestamp: int
prev_tau4_hash: str
tau3_headers: List[str] # 104 хеша τ₃
tau3_merkle_root: str
window_number: int
node_id: str
halving_number: int # Какой по счёту халвинг (0, 1, 2, ...)
new_halving_coefficient: float # Новый коэффициент (1.0, 0.5, 0.25, ...)
total_emissions_tau4: int # Всего Ɉ за эту эпоху
tau3_accumulator: str = "" # Аккумулятор ВСЕЙ истории τ₃
prev_accumulator: str = "" # Running accumulator τ₄ ПЕРЕД этим окном (GPT-5.2 fix #3)
signature: str = ""
def window_hash(self) -> str:
data = {
"halving_number": self.halving_number,
"new_halving_coefficient": self.new_halving_coefficient,
"node_id": self.node_id,
"prev_accumulator": self.prev_accumulator,
"prev_tau4_hash": self.prev_tau4_hash,
"tau3_accumulator": self.tau3_accumulator,
"tau3_merkle_root": self.tau3_merkle_root,
"timestamp": self.timestamp,
"total_emissions_tau4": self.total_emissions_tau4,
"window_number": self.window_number,
}
return hashlib.sha256(json.dumps(data, sort_keys=True).encode()).hexdigest()
def to_dict(self) -> Dict:
return {
"timestamp": self.timestamp,
"prev_tau4_hash": self.prev_tau4_hash,
"tau3_headers": self.tau3_headers,
"tau3_merkle_root": self.tau3_merkle_root,
"window_number": self.window_number,
"node_id": self.node_id,
"halving_number": self.halving_number,
"new_halving_coefficient": self.new_halving_coefficient,
"total_emissions_tau4": self.total_emissions_tau4,
"tau3_accumulator": self.tau3_accumulator,
"prev_accumulator": self.prev_accumulator,
"signature": self.signature,
"window_hash": self.window_hash(),
}
@classmethod
def from_dict(cls, d: Dict) -> "Tau4Window":
return cls(
timestamp=d["timestamp"],
prev_tau4_hash=d["prev_tau4_hash"],
tau3_headers=d.get("tau3_headers", []),
tau3_merkle_root=d.get("tau3_merkle_root", ""),
window_number=d.get("window_number", d.get("block_number", 0)),
node_id=d["node_id"],
halving_number=d.get("halving_number", 0),
new_halving_coefficient=d.get("new_halving_coefficient", 1.0),
total_emissions_tau4=d.get("total_emissions_tau4", 0),
tau3_accumulator=d.get("tau3_accumulator", ""),
prev_accumulator=d.get("prev_accumulator", ""),
signature=d.get("signature", ""),
)
# ═══════════════════════════════════════════════════════════════════════════════
# TIMECHAIN DATABASE
# ═══════════════════════════════════════════════════════════════════════════════
class TimeChainDB:
"""
Персистентность окон времени в SQLite.
IMMUTABILITY GUARANTEES:
- WAL mode + PRAGMA synchronous=FULL (crash protection)
- INSERT only (no REPLACE windows are immutable once written)
- Hash verification on every read (detect tampering)
- PRAGMA integrity_check on startup
"""
def __init__(self, db_path: str):
self.db_path = db_path
self._lock = threading.Lock()
self._init_db()
def _init_db(self):
with sqlite3.connect(self.db_path) as conn:
# IMMUTABILITY: WAL mode for crash protection
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA synchronous=FULL")
# IMMUTABILITY: integrity check on startup
result = conn.execute("PRAGMA integrity_check").fetchone()
if result[0] != "ok":
raise RuntimeError(f"DATABASE CORRUPTION DETECTED: {result[0]}")
conn.execute("""
CREATE TABLE IF NOT EXISTS tau1_windows (
window_number INTEGER PRIMARY KEY,
timestamp INTEGER NOT NULL,
window_hash TEXT UNIQUE NOT NULL,
prev_hash TEXT NOT NULL,
tx_merkle_root TEXT,
data_json TEXT NOT NULL,
signature TEXT NOT NULL
)
""")
conn.execute("""
CREATE TABLE IF NOT EXISTS tau2_windows (
window_number INTEGER PRIMARY KEY,
timestamp INTEGER NOT NULL,
window_hash TEXT UNIQUE NOT NULL,
prev_hash TEXT NOT NULL,
tau1_merkle_root TEXT,
total_emissions INTEGER,
halving_coefficient REAL,
data_json TEXT NOT NULL,
signature TEXT NOT NULL
)
""")
conn.execute("""
CREATE TABLE IF NOT EXISTS tau3_windows (
window_number INTEGER PRIMARY KEY,
timestamp INTEGER NOT NULL,
window_hash TEXT UNIQUE NOT NULL,
prev_hash TEXT NOT NULL,
tau2_merkle_root TEXT,
epoch_number INTEGER,
data_json TEXT NOT NULL,
signature TEXT NOT NULL
)
""")
conn.execute("""
CREATE TABLE IF NOT EXISTS tau4_windows (
window_number INTEGER PRIMARY KEY,
timestamp INTEGER NOT NULL,
window_hash TEXT UNIQUE NOT NULL,
prev_hash TEXT NOT NULL,
tau3_merkle_root TEXT,
halving_number INTEGER,
new_halving_coefficient REAL,
data_json TEXT NOT NULL,
signature TEXT NOT NULL
)
""")
conn.execute("""
CREATE TABLE IF NOT EXISTS chain_state (
key TEXT PRIMARY KEY,
value TEXT NOT NULL
)
""")
# NTS Anchor table — cryptographic time anchoring via 36 global NTS servers
conn.execute("""
CREATE TABLE IF NOT EXISTS nts_anchors (
window_type TEXT NOT NULL,
window_number INTEGER NOT NULL,
content_hash TEXT NOT NULL,
anchor_hash TEXT NOT NULL,
server_count INTEGER NOT NULL,
region_count INTEGER NOT NULL,
timestamp_spread_ns INTEGER NOT NULL,
data_json TEXT NOT NULL,
created_at_ns INTEGER NOT NULL,
PRIMARY KEY (window_type, window_number)
)
""")
conn.commit()
def _conn(self) -> sqlite3.Connection:
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
return conn
# --- State ---
def get_state(self, key: str, default: str = "") -> str:
with self._conn() as conn:
row = conn.execute(
"SELECT value FROM chain_state WHERE key = ?", (key,)
).fetchone()
return row["value"] if row else default
def set_state(self, key: str, value: str):
with self._lock:
with self._conn() as conn:
conn.execute(
"INSERT OR REPLACE INTO chain_state (key, value) VALUES (?, ?)",
(key, value),
)
conn.commit()
# --- τ₁ ---
def save_tau1(self, window: Tau1Window):
"""IMMUTABLE: INSERT only — once written, cannot be overwritten"""
with self._lock:
with self._conn() as conn:
conn.execute(
"""INSERT INTO tau1_windows
(window_number, timestamp, window_hash, prev_hash, tx_merkle_root, data_json, signature)
VALUES (?, ?, ?, ?, ?, ?, ?)""",
(
window.window_number,
window.timestamp,
window.window_hash(),
window.prev_tau1_hash,
window.tx_merkle_root,
json.dumps(window.to_dict(), ensure_ascii=False),
window.signature,
),
)
conn.commit()
def get_tau1(self, window_number: int) -> Optional[Tau1Window]:
"""IMMUTABLE READ: verifies hash integrity on every read"""
with self._conn() as conn:
row = conn.execute(
"SELECT data_json, window_hash FROM tau1_windows WHERE window_number = ?",
(window_number,),
).fetchone()
if row:
window = Tau1Window.from_dict(json.loads(row["data_json"]))
# INTEGRITY CHECK: recompute hash and compare
recomputed = window.window_hash()
if row["window_hash"] != recomputed:
raise RuntimeError(
f"INTEGRITY VIOLATION: τ₁ #{window_number} hash mismatch "
f"(stored={row['window_hash'][:16]}..., computed={recomputed[:16]}...)"
)
return window
return None
def get_tau1_by_hash(self, window_hash: str) -> Optional[Tau1Window]:
"""GPT-5.2 v2 fix #4: integrity check on by_hash reads too"""
with self._conn() as conn:
row = conn.execute(
"SELECT data_json, window_hash FROM tau1_windows WHERE window_hash = ?",
(window_hash,),
).fetchone()
if row:
window = Tau1Window.from_dict(json.loads(row["data_json"]))
recomputed = window.window_hash()
if row["window_hash"] != recomputed:
raise RuntimeError(
f"INTEGRITY VIOLATION: τ₁ by_hash hash mismatch "
f"(stored={row['window_hash'][:16]}..., computed={recomputed[:16]}...)"
)
return window
return None
def get_tau1_count(self) -> int:
with self._conn() as conn:
row = conn.execute("SELECT COUNT(*) as cnt FROM tau1_windows").fetchone()
return row["cnt"]
def get_all_tau1_hashes(self) -> List[str]:
"""Все хеши τ₁ окон в порядке номера"""
with self._conn() as conn:
rows = conn.execute(
"SELECT window_hash FROM tau1_windows ORDER BY window_number"
).fetchall()
return [r["window_hash"] for r in rows]
# --- τ₂ ---
def save_tau2(self, window: Tau2Window):
"""IMMUTABLE: INSERT only — once written, cannot be overwritten"""
with self._lock:
with self._conn() as conn:
conn.execute(
"""INSERT INTO tau2_windows
(window_number, timestamp, window_hash, prev_hash,
tau1_merkle_root, total_emissions, halving_coefficient,
data_json, signature)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
(
window.window_number,
window.timestamp,
window.window_hash(),
window.prev_tau2_hash,
window.tau1_merkle_root,
window.total_emissions,
window.halving_coefficient,
json.dumps(window.to_dict(), ensure_ascii=False),
window.signature,
),
)
conn.commit()
def get_tau2(self, window_number: int) -> Optional[Tau2Window]:
"""IMMUTABLE READ: verifies hash integrity on every read"""
with self._conn() as conn:
row = conn.execute(
"SELECT data_json, window_hash FROM tau2_windows WHERE window_number = ?",
(window_number,),
).fetchone()
if row:
window = Tau2Window.from_dict(json.loads(row["data_json"]))
recomputed = window.window_hash()
if row["window_hash"] != recomputed:
raise RuntimeError(
f"INTEGRITY VIOLATION: τ₂ #{window_number} hash mismatch "
f"(stored={row['window_hash'][:16]}..., computed={recomputed[:16]}...)"
)
return window
return None
def get_tau2_count(self) -> int:
with self._conn() as conn:
row = conn.execute("SELECT COUNT(*) as cnt FROM tau2_windows").fetchone()
return row["cnt"]
def get_all_tau2_hashes(self) -> List[str]:
with self._conn() as conn:
rows = conn.execute(
"SELECT window_hash FROM tau2_windows ORDER BY window_number"
).fetchall()
return [r["window_hash"] for r in rows]
# --- τ₃ ---
def save_tau3(self, window: Tau3Window):
"""IMMUTABLE: INSERT only — once written, cannot be overwritten"""
with self._lock:
with self._conn() as conn:
conn.execute(
"""INSERT INTO tau3_windows
(window_number, timestamp, window_hash, prev_hash,
tau2_merkle_root, epoch_number, data_json, signature)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
(
window.window_number,
window.timestamp,
window.window_hash(),
window.prev_tau3_hash,
window.tau2_merkle_root,
window.epoch_number,
json.dumps(window.to_dict(), ensure_ascii=False),
window.signature,
),
)
conn.commit()
def get_tau3(self, window_number: int) -> Optional[Tau3Window]:
"""IMMUTABLE READ: verifies hash integrity on every read"""
with self._conn() as conn:
row = conn.execute(
"SELECT data_json, window_hash FROM tau3_windows WHERE window_number = ?",
(window_number,),
).fetchone()
if row:
window = Tau3Window.from_dict(json.loads(row["data_json"]))
recomputed = window.window_hash()
if row["window_hash"] != recomputed:
raise RuntimeError(
f"INTEGRITY VIOLATION: τ₃ #{window_number} hash mismatch"
)
return window
return None
def get_tau3_count(self) -> int:
with self._conn() as conn:
row = conn.execute("SELECT COUNT(*) as cnt FROM tau3_windows").fetchone()
return row["cnt"]
def get_all_tau3_hashes(self) -> List[str]:
with self._conn() as conn:
rows = conn.execute(
"SELECT window_hash FROM tau3_windows ORDER BY window_number"
).fetchall()
return [r["window_hash"] for r in rows]
# --- τ₄ ---
def save_tau4(self, window: Tau4Window):
"""IMMUTABLE: INSERT only — once written, cannot be overwritten"""
with self._lock:
with self._conn() as conn:
conn.execute(
"""INSERT INTO tau4_windows
(window_number, timestamp, window_hash, prev_hash,
tau3_merkle_root, halving_number, new_halving_coefficient,
data_json, signature)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
(
window.window_number,
window.timestamp,
window.window_hash(),
window.prev_tau4_hash,
window.tau3_merkle_root,
window.halving_number,
window.new_halving_coefficient,
json.dumps(window.to_dict(), ensure_ascii=False),
window.signature,
),
)
conn.commit()
def get_tau4(self, window_number: int) -> Optional[Tau4Window]:
"""IMMUTABLE READ: verifies hash integrity on every read"""
with self._conn() as conn:
row = conn.execute(
"SELECT data_json, window_hash FROM tau4_windows WHERE window_number = ?",
(window_number,),
).fetchone()
if row:
window = Tau4Window.from_dict(json.loads(row["data_json"]))
recomputed = window.window_hash()
if row["window_hash"] != recomputed:
raise RuntimeError(
f"INTEGRITY VIOLATION: τ₄ #{window_number} hash mismatch"
)
return window
return None
def get_tau4_count(self) -> int:
with self._conn() as conn:
row = conn.execute("SELECT COUNT(*) as cnt FROM tau4_windows").fetchone()
return row["cnt"]
# --- NTS Anchor storage ---
def save_nts_anchor(self, window_type: str, window_number: int, anchor: NTSAnchorBlock):
"""Save NTS anchor block for a window"""
with self._lock:
with self._conn() as conn:
conn.execute(
"""INSERT OR REPLACE INTO nts_anchors
(window_type, window_number, content_hash, anchor_hash,
server_count, region_count, timestamp_spread_ns,
data_json, created_at_ns)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
(
window_type, window_number, anchor.content_hash,
anchor.anchor_hash, anchor.server_count,
anchor.region_count, anchor.timestamp_spread_ns,
json.dumps(anchor.to_dict(), ensure_ascii=False),
anchor.created_at_ns,
),
)
def get_nts_anchor(self, window_type: str, window_number: int) -> Optional[NTSAnchorBlock]:
"""Load NTS anchor block for a window"""
with self._conn() as conn:
row = conn.execute(
"SELECT data_json FROM nts_anchors WHERE window_type = ? AND window_number = ?",
(window_type, window_number),
).fetchone()
if row:
return NTSAnchorBlock.from_dict(json.loads(row["data_json"]))
return None
def get_nts_anchor_count(self, window_type: str = "tau2") -> int:
"""Count NTS anchors for a window type"""
with self._conn() as conn:
row = conn.execute(
"SELECT COUNT(*) as cnt FROM nts_anchors WHERE window_type = ?",
(window_type,),
).fetchone()
return row["cnt"]
# --- Recent windows queries ---
def get_recent_tau1(self, limit: int = 20) -> List[Tau1Window]:
"""Последние τ₁ окна (newest first)"""
with self._conn() as conn:
rows = conn.execute(
"SELECT data_json FROM tau1_windows ORDER BY window_number DESC LIMIT ?",
(limit,),
).fetchall()
return [Tau1Window.from_dict(json.loads(r["data_json"])) for r in rows]
def get_recent_tau2(self, limit: int = 20) -> List[Tau2Window]:
"""Последние τ₂ окна (newest first)"""
with self._conn() as conn:
rows = conn.execute(
"SELECT data_json FROM tau2_windows ORDER BY window_number DESC LIMIT ?",
(limit,),
).fetchall()
return [Tau2Window.from_dict(json.loads(r["data_json"])) for r in rows]
def get_recent_tau3(self, limit: int = 10) -> List[Tau3Window]:
"""Последние τ₃ окна (newest first)"""
with self._conn() as conn:
rows = conn.execute(
"SELECT data_json FROM tau3_windows ORDER BY window_number DESC LIMIT ?",
(limit,),
).fetchall()
return [Tau3Window.from_dict(json.loads(r["data_json"])) for r in rows]
def get_recent_tau4(self, limit: int = 5) -> List[Tau4Window]:
"""Последние τ₄ окна (newest first)"""
with self._conn() as conn:
rows = conn.execute(
"SELECT data_json FROM tau4_windows ORDER BY window_number DESC LIMIT ?",
(limit,),
).fetchall()
return [Tau4Window.from_dict(json.loads(r["data_json"])) for r in rows]
# ═══════════════════════════════════════════════════════════════════════════════
# TIMECHAIN — MAIN CLASS
# ═══════════════════════════════════════════════════════════════════════════════
class TimeChain:
"""
TimeChain 4-слойная иерархическая цепочка слайсов времени.
Каждый верхний слой вкладывает в себя все заголовки нижних матрёшка.
"""
def __init__(self, node_id: str, private_key: str, db_path: str, public_key: str = "",
strict_timestamps: bool = True, nts_service: Optional[NTSAnchorService] = None):
"""
Args:
strict_timestamps: If True, enforce timestamp validation (drift, gap).
Set to False for unit tests that create windows rapidly.
nts_service: NTS anchor service for τ₂ time anchoring.
If None, NTS anchoring is disabled (nts_anchor_hash stays empty).
"""
self.node_id = node_id
self.private_key = private_key
self.public_key = public_key
self.strict_timestamps = strict_timestamps
self.nts_service = nts_service
self.db = TimeChainDB(db_path)
self.utxo_set = UTXOSet(db_path)
# Реестр узлов: node_id (mt...) → public_key (hex)
self.node_registry: Dict[str, str] = {}
if public_key and node_id:
self.node_registry[node_id] = public_key
# Загружаем состояние из БД или инициализируем генезис
self.last_tau1_hash = self.db.get_state("last_tau1_hash", GENESIS_HASH)
self.last_tau2_hash = self.db.get_state("last_tau2_hash", GENESIS_HASH)
self.last_tau3_hash = self.db.get_state("last_tau3_hash", GENESIS_HASH)
self.last_tau4_hash = self.db.get_state("last_tau4_hash", GENESIS_HASH)
self.tau1_count = self.db.get_tau1_count()
self.tau2_count = self.db.get_tau2_count()
self.tau3_count = self.db.get_tau3_count()
self.tau4_count = self.db.get_tau4_count()
# Pending headers for aggregation (персистентные)
# Uses module-level _safe_json_loads (Gemini R2 fix #2)
self.pending_tau1_headers: List[str] = _safe_json_loads(
self.db.get_state("pending_tau1_headers", "[]"), []
)
self.pending_tau2_headers: List[str] = _safe_json_loads(
self.db.get_state("pending_tau2_headers", "[]"), []
)
self.pending_tau3_headers: List[str] = _safe_json_loads(
self.db.get_state("pending_tau3_headers", "[]"), []
)
# Emission tracking
self.pending_tau2_emissions: int = int(self.db.get_state("pending_tau2_emissions", "0"))
self.pending_tau3_emissions: int = int(self.db.get_state("pending_tau3_emissions", "0"))
self.pending_tau4_emissions: int = int(self.db.get_state("pending_tau4_emissions", "0"))
self.time_bank_spent: int = int(self.db.get_state("time_bank_spent", "0"))
# Chain accumulators — running криптографический коммит ко ВСЕЙ истории
# accumulator(N) = SHA256(accumulator(N-1) + window_hash(N))
self.tau1_accumulator = self.db.get_state("tau1_accumulator", GENESIS_HASH)
self.tau2_accumulator = self.db.get_state("tau2_accumulator", GENESIS_HASH)
self.tau3_accumulator = self.db.get_state("tau3_accumulator", GENESIS_HASH)
self.tau4_accumulator = self.db.get_state("tau4_accumulator", GENESIS_HASH)
# Startup verification: check chain_state matches actual DB (GPT-5.2 fix #5)
if self.tau1_count > 0:
self._verify_chain_state_on_startup()
def _verify_chain_state_on_startup(self):
"""
Verify chain_state matches actual chain data on startup.
Prevents head substitution / state corruption attacks.
GPT-5.2 v3 fix #9: Expanded to verify τ₂/τ₃/τ₄ heads too (not just τ₁).
"""
# Verify last τ₁ hash matches actual last window
last_window = self.db.get_tau1(self.tau1_count - 1)
if last_window is None:
raise RuntimeError(
f"CHAIN STATE CORRUPTION: τ₁ count={self.tau1_count} "
f"but window #{self.tau1_count - 1} not found"
)
actual_last_hash = last_window.window_hash()
if self.last_tau1_hash != actual_last_hash:
raise RuntimeError(
f"CHAIN STATE CORRUPTION: last_tau1_hash mismatch "
f"(state={self.last_tau1_hash[:16]}..., actual={actual_last_hash[:16]}...)"
)
# Verify genesis exists and is correct
genesis = self.db.get_tau1(0)
if genesis is None:
raise RuntimeError("CHAIN STATE CORRUPTION: genesis window not found")
if genesis.prev_tau1_hash != GENESIS_HASH:
raise RuntimeError("CHAIN STATE CORRUPTION: genesis prev_hash != GENESIS_HASH")
# GPT-5.2 v3 fix #9: Verify τ₂ head
if self.tau2_count > 0:
last_t2 = self.db.get_tau2(self.tau2_count - 1)
if last_t2 is None:
raise RuntimeError(f"CHAIN STATE CORRUPTION: τ₂ count={self.tau2_count} but last not found")
if self.last_tau2_hash != last_t2.window_hash():
raise RuntimeError("CHAIN STATE CORRUPTION: last_tau2_hash mismatch")
# GPT-5.2 v3 fix #9: Verify τ₃ head
if self.tau3_count > 0:
last_t3 = self.db.get_tau3(self.tau3_count - 1)
if last_t3 is None:
raise RuntimeError(f"CHAIN STATE CORRUPTION: τ₃ count={self.tau3_count} but last not found")
if self.last_tau3_hash != last_t3.window_hash():
raise RuntimeError("CHAIN STATE CORRUPTION: last_tau3_hash mismatch")
# GPT-5.2 v3 fix #9: Verify τ₄ head
if self.tau4_count > 0:
last_t4 = self.db.get_tau4(self.tau4_count - 1)
if last_t4 is None:
raise RuntimeError(f"CHAIN STATE CORRUPTION: τ₄ count={self.tau4_count} but last not found")
if self.last_tau4_hash != last_t4.window_hash():
raise RuntimeError("CHAIN STATE CORRUPTION: last_tau4_hash mismatch")
# ─── Node Registry ─────────────────────────────────────────────────────
def register_node(self, node_id: str, public_key: str):
"""
Зарегистрировать узел для верификации подписей окон времени.
Gemini fix #4: Validate public key format before registration.
"""
if not isinstance(public_key, str) or \
len(public_key) != EXPECTED_PUBKEY_HEX_LEN or \
not all(c in '0123456789abcdef' for c in public_key):
raise ValueError(
f"Invalid public key for node {node_id[:16]}...: "
f"expected {EXPECTED_PUBKEY_HEX_LEN} hex chars, got {len(public_key)}"
)
self.node_registry[node_id] = public_key
# ─── Persistence ────────────────────────────────────────────────────────
def _persist_pending(self):
"""Сохранить pending headers и emissions в БД (защита от crash)"""
self.db.set_state("pending_tau1_headers", json.dumps(self.pending_tau1_headers))
self.db.set_state("pending_tau2_headers", json.dumps(self.pending_tau2_headers))
self.db.set_state("pending_tau3_headers", json.dumps(self.pending_tau3_headers))
self.db.set_state("pending_tau2_emissions", str(self.pending_tau2_emissions))
self.db.set_state("pending_tau3_emissions", str(self.pending_tau3_emissions))
self.db.set_state("pending_tau4_emissions", str(self.pending_tau4_emissions))
# ─── Signing ──────────────────────────────────────────────────────────────
def _sign(self, message: str) -> str:
"""Подписать сообщение ML-DSA-65"""
return sign_message(self.private_key, message)
# ─── Timestamp Validation (GPT-5.2 fix #4) ──────────────────────────────
def _validate_timestamp(self, timestamp_ns: int, is_genesis: bool = False):
"""
GPT-5.2 fix #4: Enforce timestamp constraints.
- Must not be from future (beyond MAX_TIMESTAMP_DRIFT_NS)
- Must be strictly after previous window
- Must have minimum gap (MIN_TAU1_GAP_NS)
"""
if is_genesis:
return # Genesis has fixed timestamp
if not self.strict_timestamps:
return # Skip validation in test mode
now_ns = time.time_ns()
# Future check
if timestamp_ns > now_ns + MAX_TIMESTAMP_DRIFT_NS:
raise ValueError(
f"Timestamp from future: {timestamp_ns} > now + drift "
f"({now_ns + MAX_TIMESTAMP_DRIFT_NS})"
)
# Gemini fix #2: Past drift check (prevent timestamps far in the past)
if timestamp_ns < now_ns - MAX_TIMESTAMP_DRIFT_NS:
raise ValueError(
f"Timestamp too far in past: {timestamp_ns} < now - drift "
f"({now_ns - MAX_TIMESTAMP_DRIFT_NS})"
)
# Must be after previous window
if self.tau1_count > 0:
prev_window = self.db.get_tau1(self.tau1_count - 1)
if prev_window and timestamp_ns <= prev_window.timestamp:
raise ValueError(
f"Timestamp not monotonic: {timestamp_ns} <= prev {prev_window.timestamp}"
)
# Minimum gap (anti-sybil)
if prev_window and (timestamp_ns - prev_window.timestamp) < MIN_TAU1_GAP_NS:
gap_s = (timestamp_ns - prev_window.timestamp) / 1e9
raise ValueError(
f"Timestamp gap too small: {gap_s:.1f}s < {MIN_TAU1_GAP_NS / 1e9:.0f}s"
)
# ─── Genesis ──────────────────────────────────────────────────────────────
def create_genesis_window(self) -> Tau1Window:
"""
Создаёт генезис-окно (τ₁ #0).
Нулевое окно. prev_hash = 64 нуля.
Нет транзакций. Метка: Montana Genesis 09.01.2026.
GPT-5.2 fix #3/#14: Consistent accumulator.
prev_accumulator = GENESIS_HASH (no history before genesis).
After genesis: tau1_accumulator = SHA256(GENESIS_HASH + genesis_window_hash).
"""
if self.tau1_count > 0:
raise RuntimeError("Genesis window already exists")
window = Tau1Window(
timestamp=GENESIS_TIMESTAMP_NS,
prev_tau1_hash=GENESIS_HASH,
transactions=[],
tx_merkle_root=merkle_root([]),
presence_proof_hashes=[],
window_number=0,
node_id=self.node_id,
prev_accumulator=GENESIS_HASH, # No history before genesis
)
# Подписываем
window.signature = self._sign(window.window_hash())
# Сохраняем
self._save_tau1(window)
# Update accumulator: SHA256(GENESIS_HASH + genesis_window_hash)
self.tau1_accumulator = compute_accumulator(GENESIS_HASH, window.window_hash())
self.db.set_state("tau1_accumulator", self.tau1_accumulator)
logger.info(f"Genesis time window created: {window.window_hash()[:16]}...")
return window
# ─── τ₁ Window Creation ────────────────────────────────────────────────────
def create_tau1_window(
self,
transactions: List[Transaction],
presence_proof_hashes: Optional[List[str]] = None,
) -> Tau1Window:
"""
Создать новое τ₁ окно времени с транзакциями.
GPT-5.2 fixes applied:
- #2: Atomic UTXO + window save (single SQLite transaction)
- #3: Correct accumulator (prev_accumulator = running accumulator)
- #4: Timestamp validation (drift, monotonicity, gap)
- #12: DoS protection (size limits)
"""
if presence_proof_hashes is None:
presence_proof_hashes = []
# DoS protection (GPT-5.2 fix #12)
if len(transactions) > MAX_TX_PER_TAU1:
raise ValueError(f"Too many transactions: {len(transactions)} > {MAX_TX_PER_TAU1}")
if len(presence_proof_hashes) > MAX_PROOFS_PER_TAU1:
raise ValueError(f"Too many proofs: {len(presence_proof_hashes)} > {MAX_PROOFS_PER_TAU1}")
# GPT-5.2 R6 fix #3: Duplicate TX check (DoS protection)
seen_tx_hashes = set()
for tx in transactions:
if tx.tx_hash in seen_tx_hashes:
raise ValueError(f"Duplicate transaction in window: {tx.tx_hash[:16]}...")
seen_tx_hashes.add(tx.tx_hash)
# Валидация каждой транзакции
for tx in transactions:
valid, err = validate_transaction(tx, self.utxo_set)
if not valid:
raise ValueError(f"Invalid transaction {tx.tx_hash[:16]}...: {err}")
# Сериализация транзакций
tx_dicts = [tx.to_dict() for tx in transactions]
tx_hashes = [tx.tx_hash for tx in transactions]
tx_root = merkle_root(tx_hashes) if tx_hashes else merkle_root([])
# Timestamp with validation (GPT-5.2 fix #4)
ts = time.time_ns()
self._validate_timestamp(ts)
# Accumulator: store running accumulator BEFORE this window (GPT-5.2 fix #3)
window = Tau1Window(
timestamp=ts,
prev_tau1_hash=self.last_tau1_hash,
transactions=tx_dicts,
tx_merkle_root=tx_root,
presence_proof_hashes=presence_proof_hashes,
window_number=self.tau1_count,
node_id=self.node_id,
prev_accumulator=self.tau1_accumulator,
)
# Подписываем
window.signature = self._sign(window.window_hash())
# ATOMIC: apply transactions + save window in ONE SQLite transaction (GPT-5.2 fix #2)
# GPT-5.2 R4 fix #1: Convert RuntimeError to ValueError for clean TOCTOU handling
try:
self._save_tau1_atomic(window, transactions)
except RuntimeError as e:
raise ValueError(f"Atomic save failed (possible TOCTOU race): {e}") from e
return window
def _save_tau1(self, window: Tau1Window):
"""
Save τ₁ window (for genesis no UTXO operations needed).
"""
whash = window.window_hash()
new_pending = self.pending_tau1_headers + [whash]
with self.db._lock:
conn = sqlite3.connect(self.db.db_path)
try:
conn.execute("BEGIN IMMEDIATE")
conn.execute(
"""INSERT INTO tau1_windows
(window_number, timestamp, window_hash, prev_hash, tx_merkle_root, data_json, signature)
VALUES (?, ?, ?, ?, ?, ?, ?)""",
(
window.window_number,
window.timestamp,
whash,
window.prev_tau1_hash,
window.tx_merkle_root,
json.dumps(window.to_dict(), ensure_ascii=False),
window.signature,
),
)
conn.execute(
"INSERT OR REPLACE INTO chain_state (key, value) VALUES (?, ?)",
("last_tau1_hash", whash),
)
conn.execute(
"INSERT OR REPLACE INTO chain_state (key, value) VALUES (?, ?)",
("pending_tau1_headers", json.dumps(new_pending)),
)
conn.commit()
# Gemini fix #1: In-memory updates INSIDE lock
self.last_tau1_hash = whash
self.tau1_count = window.window_number + 1
self.pending_tau1_headers = new_pending
except Exception:
conn.rollback()
raise
finally:
conn.close()
def _save_tau1_atomic(self, window: Tau1Window, transactions: List[Transaction]):
"""
GPT-5.2 fix #2: ATOMIC save τ₁ window + UTXO operations.
All UTXO changes (spend inputs, add outputs) and window save
happen in a SINGLE SQLite transaction. If anything fails,
everything rolls back no partial state.
"""
whash = window.window_hash()
new_pending = self.pending_tau1_headers + [whash]
with self.db._lock:
conn = sqlite3.connect(self.db.db_path)
try:
conn.execute("BEGIN IMMEDIATE")
# 1. Apply UTXO changes for each transaction
for tx in transactions:
if tx.tx_type == "transfer":
# Spend inputs
for inp in tx.inputs:
cursor = conn.execute(
"""UPDATE utxos SET spent_by_tx = ?
WHERE tx_hash = ? AND output_idx = ? AND spent_by_tx IS NULL""",
(tx.tx_hash, inp.tx_hash, inp.output_idx),
)
if cursor.rowcount != 1:
raise RuntimeError(
f"Failed to spend UTXO {inp.tx_hash[:16]}...:{inp.output_idx}"
)
# Add outputs (both coinbase and transfer)
# GPT-5.2 v3 fix #5/#6: Hard error on duplicate (no INSERT OR IGNORE)
for idx, out in enumerate(tx.outputs):
cursor = conn.execute(
"""INSERT INTO utxos
(tx_hash, output_idx, address, amount, created_in_tau1, spent_by_tx)
VALUES (?, ?, ?, ?, ?, NULL)""",
(tx.tx_hash, idx, out.address, out.amount, window.window_number),
)
if cursor.rowcount != 1:
raise RuntimeError(
f"Failed to insert UTXO output {tx.tx_hash[:16]}...:{idx}"
)
# 2. Save window
conn.execute(
"""INSERT INTO tau1_windows
(window_number, timestamp, window_hash, prev_hash, tx_merkle_root, data_json, signature)
VALUES (?, ?, ?, ?, ?, ?, ?)""",
(
window.window_number,
window.timestamp,
whash,
window.prev_tau1_hash,
window.tx_merkle_root,
json.dumps(window.to_dict(), ensure_ascii=False),
window.signature,
),
)
# 3. Update chain state
conn.execute(
"INSERT OR REPLACE INTO chain_state (key, value) VALUES (?, ?)",
("last_tau1_hash", whash),
)
conn.execute(
"INSERT OR REPLACE INTO chain_state (key, value) VALUES (?, ?)",
("pending_tau1_headers", json.dumps(new_pending)),
)
# 4. Update accumulator in same transaction
new_acc = compute_accumulator(self.tau1_accumulator, whash)
conn.execute(
"INSERT OR REPLACE INTO chain_state (key, value) VALUES (?, ?)",
("tau1_accumulator", new_acc),
)
conn.commit()
# Gemini fix #1: In-memory updates INSIDE lock (race-condition safe)
self.last_tau1_hash = whash
self.tau1_count = window.window_number + 1
self.pending_tau1_headers = new_pending
self.tau1_accumulator = compute_accumulator(self.tau1_accumulator, whash)
except Exception:
conn.rollback()
raise
finally:
conn.close()
# ─── τ₂ Window (Emission) ─────────────────────────────────────────────────
def finalize_tau2(
self,
coinbase_txs: List[Transaction],
halving_coefficient: float,
) -> Optional[Tau2Window]:
"""
Финализация τ₂: создаёт эмиссионное окно времени.
GPT-5.2 fixes applied:
- #3: Correct accumulator (prev_accumulator)
- #9: Coefficient validation (NaN/inf/range) + total emission cap
- #10: Atomic UTXO + window save
- #12: DoS protection (max coinbase count)
"""
if len(self.pending_tau1_headers) < TAU1_PER_TAU2:
return None
# GPT-5.2 fix #9 + R4 fix #2: Validate halving_coefficient
# Must be power of 1/2 (exactly representable in IEEE 754) for cross-platform consistency
if not isinstance(halving_coefficient, (int, float)):
raise ValueError(f"halving_coefficient must be numeric: {halving_coefficient}")
if math.isnan(halving_coefficient) or math.isinf(halving_coefficient):
raise ValueError(f"halving_coefficient is NaN/Inf: {halving_coefficient}")
if halving_coefficient <= 0 or halving_coefficient > 1.0:
raise ValueError(
f"halving_coefficient out of range (0, 1.0]: {halving_coefficient}"
)
# GPT-5.2 R4 fix #2: Enforce power-of-2 denominator (1.0, 0.5, 0.25, 0.125, ...)
if not _is_power_of_half(halving_coefficient):
raise ValueError(
f"halving_coefficient must be a power of 1/2 (1.0, 0.5, 0.25, ...): {halving_coefficient}"
)
# GPT-5.2 fix #12: DoS protection
if len(coinbase_txs) > MAX_COINBASE_PER_TAU2:
raise ValueError(f"Too many coinbase txs: {len(coinbase_txs)} > {MAX_COINBASE_PER_TAU2}")
# TIME_BANK underflow protection
time_bank_remaining = TIME_BANK_TOTAL_SECONDS - self.time_bank_spent
if time_bank_remaining <= 0:
raise ValueError(
f"TIME_BANK exhausted: spent={self.time_bank_spent}s, "
f"total={TIME_BANK_TOTAL_SECONDS}s — Oracle Mode required"
)
tau1_headers = self.pending_tau1_headers[:TAU1_PER_TAU2]
tau1_root = merkle_root(tau1_headers)
# Валидация coinbase транзакций
total_emissions = 0
coinbase_dicts = []
for tx in coinbase_txs:
if tx.tx_type != "coinbase":
raise ValueError(f"Expected coinbase tx, got {tx.tx_type}")
if tx.inputs:
raise ValueError("Coinbase tx must have no inputs")
for out in tx.outputs:
if out.amount <= 0:
raise ValueError(f"Coinbase output amount must be positive: {out.amount}")
total_emissions += sum(out.amount for out in tx.outputs)
coinbase_dicts.append(tx.to_dict())
# GPT-5.2 fix #9 + Gemini fix #3: Total emission cap per τ₂
# max(1, ...) prevents emission halt when halving_coefficient is very small
max_total_emission = max(1, int(TAU2_SECONDS * halving_coefficient * len(coinbase_txs)))
if total_emissions > max_total_emission:
raise ValueError(
f"Total emissions overflow: {total_emissions}Ɉ > max {max_total_emission}Ɉ "
f"(TAU2={TAU2_SECONDS}s × coef={halving_coefficient} × {len(coinbase_txs)} participants)"
)
# GPT-5.2 R4 fix #3 + Gemini R2 fix #6: Per-ADDRESS emission cap
# Aggregates across ALL coinbase TXs to the same address
max_per_participant = max(1, int(TAU2_SECONDS * halving_coefficient))
address_emissions: Dict[str, int] = {}
for tx in coinbase_txs:
for out in tx.outputs:
if out.amount > max_per_participant:
raise ValueError(
f"Emission overflow for {out.address[:16]}...: "
f"{out.amount}Ɉ > max {max_per_participant}Ɉ per participant "
f"(TAU2={TAU2_SECONDS}s × coef={halving_coefficient})"
)
address_emissions[out.address] = address_emissions.get(out.address, 0) + out.amount
for addr, total in address_emissions.items():
if total > max_per_participant:
raise ValueError(
f"Per-address emission overflow: {addr[:16]}... got {total}Ɉ > "
f"max {max_per_participant}Ɉ per participant"
)
# TIME_BANK расход
new_time_bank_spent = self.time_bank_spent + TAU2_SECONDS
time_bank_remaining = TIME_BANK_TOTAL_SECONDS - new_time_bank_spent
# Accumulator: store running value BEFORE this window (GPT-5.2 fix #3)
window = Tau2Window(
timestamp=time.time_ns(),
prev_tau2_hash=self.last_tau2_hash,
tau1_headers=tau1_headers,
tau1_merkle_root=tau1_root,
coinbase_txs=coinbase_dicts,
window_number=self.tau2_count,
node_id=self.node_id,
total_emissions=total_emissions,
halving_coefficient=halving_coefficient,
time_bank_remaining=time_bank_remaining,
tau1_accumulator=self.tau1_accumulator,
prev_accumulator=self.tau2_accumulator,
)
# ═══ NTS ANCHOR — bind τ₂ to global atomic time (36 servers) ═══
nts_anchor = None
if self.nts_service:
c_hash = window.content_hash()
nts_anchor = self.nts_service.collect_anchor(
c_hash,
window_number=window.window_number,
prev_hash=window.prev_tau2_hash,
)
if nts_anchor:
window.nts_anchor_hash = nts_anchor.anchor_hash
logger.info(
f"τ₂ #{window.window_number}: NTS anchor ✓ "
f"{nts_anchor.server_count} servers, "
f"{nts_anchor.region_count} regions"
)
else:
logger.warning(
f"τ₂ #{window.window_number}: NTS anchor FAILED — "
f"not enough server responses"
)
window.signature = self._sign(window.window_hash())
whash = window.window_hash()
# GPT-5.2 fix #10: ATOMIC save (τ₂ + UTXO + state in one transaction)
new_pending_tau1 = self.pending_tau1_headers[TAU1_PER_TAU2:]
new_pending_tau2 = self.pending_tau2_headers + [whash]
with self.db._lock:
conn = sqlite3.connect(self.db.db_path)
try:
conn.execute("BEGIN IMMEDIATE")
# 1. Add coinbase outputs to UTXO
# GPT-5.2 v3 fix #5: Hard error on duplicate (no INSERT OR IGNORE)
for tx in coinbase_txs:
for idx, out in enumerate(tx.outputs):
cursor = conn.execute(
"""INSERT INTO utxos
(tx_hash, output_idx, address, amount, created_in_tau1, spent_by_tx)
VALUES (?, ?, ?, ?, ?, NULL)""",
(tx.tx_hash, idx, out.address, out.amount, self.tau1_count - 1),
)
if cursor.rowcount != 1:
raise RuntimeError(
f"Failed to insert coinbase UTXO {tx.tx_hash[:16]}...:{idx}"
)
# 2. Save τ₂ window
conn.execute(
"""INSERT INTO tau2_windows
(window_number, timestamp, window_hash, prev_hash,
tau1_merkle_root, total_emissions, halving_coefficient,
data_json, signature)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
(
window.window_number,
window.timestamp,
whash,
window.prev_tau2_hash,
window.tau1_merkle_root,
window.total_emissions,
window.halving_coefficient,
json.dumps(window.to_dict(), ensure_ascii=False),
window.signature,
),
)
# 3. Update ALL state atomically
new_acc = compute_accumulator(self.tau2_accumulator, whash)
for k, v in [
("last_tau2_hash", whash),
("time_bank_spent", str(new_time_bank_spent)),
("tau2_accumulator", new_acc),
("pending_tau1_headers", json.dumps(new_pending_tau1)),
("pending_tau2_headers", json.dumps(new_pending_tau2)),
("pending_tau2_emissions", str(self.pending_tau2_emissions + total_emissions)),
]:
conn.execute(
"INSERT OR REPLACE INTO chain_state (key, value) VALUES (?, ?)",
(k, v),
)
# 4. Save NTS anchor INSIDE atomic transaction (no orphaned anchors)
if nts_anchor:
conn.execute(
"""INSERT OR REPLACE INTO nts_anchors
(window_type, window_number, content_hash, anchor_hash,
server_count, region_count, timestamp_spread_ns,
data_json, created_at_ns)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
(
"tau2", window.window_number, nts_anchor.content_hash,
nts_anchor.anchor_hash, nts_anchor.server_count,
nts_anchor.region_count, nts_anchor.timestamp_spread_ns,
json.dumps(nts_anchor.to_dict(), ensure_ascii=False),
nts_anchor.created_at_ns,
),
)
conn.commit()
# Gemini fix #1: In-memory updates INSIDE lock
self.last_tau2_hash = whash
self.tau2_count = window.window_number + 1
self.time_bank_spent = new_time_bank_spent
self.pending_tau1_headers = new_pending_tau1
self.pending_tau2_headers = new_pending_tau2
self.pending_tau2_emissions += total_emissions
self.tau2_accumulator = compute_accumulator(self.tau2_accumulator, whash)
except Exception:
conn.rollback()
raise
finally:
conn.close()
logger.info(
f"τ₂ #{window.window_number}: {whash[:16]}... "
f"emissions={total_emissions}Ɉ halving={halving_coefficient}"
f"{' NTS✓' if nts_anchor else ''}"
)
return window
# ─── τ₃ Window (Checkpoint) ────────────────────────────────────────────────
def finalize_tau3(self, epoch_number: int, adaptive_cooldown: float = 1.0) -> Optional[Tau3Window]:
"""
Финализация τ₃: создаёт чекпоинт каждые 14 дней.
Требует 2016 накопленных τ₂ headers.
GPT-5.2 v2 fix #5: ATOMIC save (window + state in one SQLite transaction).
"""
if len(self.pending_tau2_headers) < TAU2_PER_TAU3:
return None
tau2_headers = self.pending_tau2_headers[:TAU2_PER_TAU3]
tau2_root = merkle_root(tau2_headers)
window = Tau3Window(
timestamp=time.time_ns(),
prev_tau3_hash=self.last_tau3_hash,
tau2_headers=tau2_headers,
tau2_merkle_root=tau2_root,
window_number=self.tau3_count,
node_id=self.node_id,
epoch_number=epoch_number,
total_emissions_epoch=self.pending_tau2_emissions,
adaptive_cooldown=adaptive_cooldown,
tau2_accumulator=self.tau2_accumulator,
prev_accumulator=self.tau3_accumulator,
)
window.signature = self._sign(window.window_hash())
whash = window.window_hash()
# GPT-5.2 v2 fix #5: ATOMIC save
new_pending_tau2 = self.pending_tau2_headers[TAU2_PER_TAU3:]
new_pending_tau3 = self.pending_tau3_headers + [whash]
new_tau3_emissions = self.pending_tau3_emissions + self.pending_tau2_emissions
with self.db._lock:
conn = sqlite3.connect(self.db.db_path)
try:
conn.execute("BEGIN IMMEDIATE")
conn.execute(
"""INSERT INTO tau3_windows
(window_number, timestamp, window_hash, prev_hash,
tau2_merkle_root, epoch_number, data_json, signature)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
(
window.window_number, window.timestamp, whash,
window.prev_tau3_hash, window.tau2_merkle_root,
window.epoch_number,
json.dumps(window.to_dict(), ensure_ascii=False),
window.signature,
),
)
new_acc = compute_accumulator(self.tau3_accumulator, whash)
for k, v in [
("last_tau3_hash", whash),
("tau3_accumulator", new_acc),
("pending_tau2_headers", json.dumps(new_pending_tau2)),
("pending_tau3_headers", json.dumps(new_pending_tau3)),
("pending_tau2_emissions", "0"),
("pending_tau3_emissions", str(new_tau3_emissions)),
]:
conn.execute(
"INSERT OR REPLACE INTO chain_state (key, value) VALUES (?, ?)", (k, v),
)
conn.commit()
# Gemini fix #1: In-memory updates INSIDE lock
self.last_tau3_hash = whash
self.tau3_count = window.window_number + 1
self.pending_tau2_headers = new_pending_tau2
self.pending_tau3_headers = new_pending_tau3
self.pending_tau3_emissions = new_tau3_emissions
self.pending_tau2_emissions = 0
self.tau3_accumulator = compute_accumulator(self.tau3_accumulator, whash)
except Exception:
conn.rollback()
raise
finally:
conn.close()
logger.info(f"τ₃ #{window.window_number}: {whash[:16]}... epoch={epoch_number}")
return window
# ─── τ₄ Window (Halving) ──────────────────────────────────────────────────
def finalize_tau4(self, halving_number: int, new_coefficient: float) -> Optional[Tau4Window]:
"""
Финализация τ₄: халвинг каждые 4 года.
Требует 104 накопленных τ₃ headers.
GPT-5.2 v2 fix #5: ATOMIC save (window + state in one SQLite transaction).
"""
if len(self.pending_tau3_headers) < TAU3_PER_TAU4:
return None
# GPT-5.2 fix #9 + R4 fix #2: Validate new_coefficient
if not isinstance(new_coefficient, (int, float)):
raise ValueError(f"new_coefficient must be numeric: {new_coefficient}")
if math.isnan(new_coefficient) or math.isinf(new_coefficient):
raise ValueError(f"new_coefficient is NaN/Inf: {new_coefficient}")
if new_coefficient <= 0 or new_coefficient > 1.0:
raise ValueError(f"new_coefficient out of range (0, 1.0]: {new_coefficient}")
if not _is_power_of_half(new_coefficient):
raise ValueError(
f"new_coefficient must be a power of 1/2 (1.0, 0.5, 0.25, ...): {new_coefficient}"
)
tau3_headers = self.pending_tau3_headers[:TAU3_PER_TAU4]
tau3_root = merkle_root(tau3_headers)
window = Tau4Window(
timestamp=time.time_ns(),
prev_tau4_hash=self.last_tau4_hash,
tau3_headers=tau3_headers,
tau3_merkle_root=tau3_root,
window_number=self.tau4_count,
node_id=self.node_id,
halving_number=halving_number,
new_halving_coefficient=new_coefficient,
total_emissions_tau4=self.pending_tau3_emissions,
tau3_accumulator=self.tau3_accumulator,
prev_accumulator=self.tau4_accumulator,
)
window.signature = self._sign(window.window_hash())
whash = window.window_hash()
# GPT-5.2 v2 fix #5: ATOMIC save
new_pending_tau3 = self.pending_tau3_headers[TAU3_PER_TAU4:]
new_tau4_emissions = self.pending_tau4_emissions + self.pending_tau3_emissions
with self.db._lock:
conn = sqlite3.connect(self.db.db_path)
try:
conn.execute("BEGIN IMMEDIATE")
conn.execute(
"""INSERT INTO tau4_windows
(window_number, timestamp, window_hash, prev_hash,
tau3_merkle_root, halving_number, new_halving_coefficient,
data_json, signature)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
(
window.window_number, window.timestamp, whash,
window.prev_tau4_hash, window.tau3_merkle_root,
window.halving_number, window.new_halving_coefficient,
json.dumps(window.to_dict(), ensure_ascii=False),
window.signature,
),
)
new_acc = compute_accumulator(self.tau4_accumulator, whash)
for k, v in [
("last_tau4_hash", whash),
("tau4_accumulator", new_acc),
("pending_tau3_headers", json.dumps(new_pending_tau3)),
("pending_tau3_emissions", "0"),
("pending_tau4_emissions", str(new_tau4_emissions)),
]:
conn.execute(
"INSERT OR REPLACE INTO chain_state (key, value) VALUES (?, ?)", (k, v),
)
conn.commit()
# Gemini fix #1: In-memory updates INSIDE lock
self.last_tau4_hash = whash
self.tau4_count = window.window_number + 1
self.pending_tau3_headers = new_pending_tau3
self.pending_tau4_emissions = new_tau4_emissions
self.pending_tau3_emissions = 0
self.tau4_accumulator = compute_accumulator(self.tau4_accumulator, whash)
except Exception:
conn.rollback()
raise
finally:
conn.close()
logger.info(
f"τ₄ #{window.window_number}: {whash[:16]}... "
f"halving={halving_number} coef={new_coefficient}"
)
return window
# ═══════════════════════════════════════════════════════════════════════════
# VERIFICATION
# ═══════════════════════════════════════════════════════════════════════════
def verify_tau1_chain(self) -> Tuple[bool, str]:
"""
Проверка 1: Горизонтальная целостность τ₁ + аккумулятор.
GPT-5.2 fix #3: verify prev_accumulator == running accumulator
(simpler and more correct than old chain_accumulator check).
"""
count = self.db.get_tau1_count()
if count == 0:
return True, "Empty chain"
# GPT-5.2 R5 fix #3: Chain length sanity check (OOM protection)
if count > MAX_CHAIN_LENGTH:
return False, f"Chain too long: {count} > {MAX_CHAIN_LENGTH}"
prev_hash = GENESIS_HASH
prev_timestamp = 0
running_accumulator = GENESIS_HASH
for i in range(count):
window = self.db.get_tau1(i)
if window is None:
return False, f"τ₁ #{i}: time window not found"
if window.window_number != i:
return False, f"τ₁ #{i}: window_number mismatch (expected {i}, got {window.window_number})"
if window.prev_tau1_hash != prev_hash:
return False, (
f"τ₁ #{i}: prev_hash mismatch "
f"(expected {prev_hash[:16]}..., got {window.prev_tau1_hash[:16]}...)"
)
# GPT-5.2 fix #3: Verify prev_accumulator matches running accumulator
if window.prev_accumulator:
if window.prev_accumulator != running_accumulator:
return False, (
f"τ₁ #{i}: prev_accumulator mismatch "
f"(expected {running_accumulator[:16]}..., got {window.prev_accumulator[:16]}...)"
)
if window.timestamp <= prev_timestamp and i > 0:
return False, f"τ₁ #{i}: timestamp not increasing ({window.timestamp} <= {prev_timestamp})"
# Future timestamp check (always enforced)
if i > 0:
now_ns = time.time_ns()
if window.timestamp > now_ns + MAX_TIMESTAMP_DRIFT_NS:
return False, f"τ₁ #{i}: timestamp from future ({window.timestamp})"
# Gemini R2 fix #4: Inline signature verification (consistent with τ₂/τ₃/τ₄)
if not window.signature:
return False, f"τ₁ #{i}: missing signature (signatures are mandatory)"
pubkey = self.node_registry.get(window.node_id)
if pubkey:
if not verify_signature(pubkey, window.window_hash(), window.signature):
return False, f"τ₁ #{i}: INVALID ML-DSA-65 signature"
elif self.node_registry:
return False, f"τ₁ #{i}: unknown node {window.node_id[:16]}..."
whash = window.window_hash()
running_accumulator = compute_accumulator(running_accumulator, whash)
prev_hash = whash
prev_timestamp = window.timestamp
return True, f"OK ({count} τ₁ windows verified, sigs + accumulator intact)"
def verify_tau2_chain(self) -> Tuple[bool, str]:
"""
Горизонтальная целостность τ₂ + аккумулятор + подписи.
GPT-5.2 v3 fixes:
- #1/#7: Signatures MANDATORY (fail if missing)
- #4: Cross-layer accumulator binding (tau1_accumulator verified)
- #8: Future timestamp check on τ₂
"""
count = self.db.get_tau2_count()
if count == 0:
return True, "Empty τ₂ chain"
prev_hash = GENESIS_HASH
prev_timestamp = 0
running_accumulator = GENESIS_HASH
for i in range(count):
window = self.db.get_tau2(i)
if window is None:
return False, f"τ₂ #{i}: window not found"
if window.window_number != i:
return False, f"τ₂ #{i}: window_number mismatch (expected {i}, got {window.window_number})"
if window.prev_tau2_hash != prev_hash:
return False, (
f"τ₂ #{i}: prev_hash mismatch "
f"(expected {prev_hash[:16]}..., got {window.prev_tau2_hash[:16]}...)"
)
if window.prev_accumulator and window.prev_accumulator != running_accumulator:
return False, (
f"τ₂ #{i}: prev_accumulator mismatch "
f"(expected {running_accumulator[:16]}..., got {window.prev_accumulator[:16]}...)"
)
if window.timestamp <= prev_timestamp and i > 0:
return False, f"τ₂ #{i}: timestamp not increasing"
# GPT-5.2 v3 fix #8: Future timestamp check
now_ns = time.time_ns()
if window.timestamp > now_ns + MAX_TIMESTAMP_DRIFT_NS:
return False, f"τ₂ #{i}: timestamp from future"
# GPT-5.2 v3 fix #1/#7: Signature MANDATORY
if not window.signature:
return False, f"τ₂ #{i}: missing signature (signatures are mandatory)"
pubkey = self.node_registry.get(window.node_id)
if pubkey:
if not verify_signature(pubkey, window.window_hash(), window.signature):
return False, f"τ₂ #{i}: INVALID ML-DSA-65 signature"
elif self.node_registry:
return False, f"τ₂ #{i}: unknown node {window.node_id[:16]}..."
whash = window.window_hash()
running_accumulator = compute_accumulator(running_accumulator, whash)
prev_hash = whash
prev_timestamp = window.timestamp
return True, f"OK ({count} τ₂ windows verified, sigs + accumulator intact)"
def verify_tau3_chain(self) -> Tuple[bool, str]:
"""
Горизонтальная целостность τ₃ + аккумулятор + подписи.
GPT-5.2 v3 fixes: mandatory signatures, future timestamp check.
"""
count = self.db.get_tau3_count()
if count == 0:
return True, "Empty τ₃ chain"
prev_hash = GENESIS_HASH
prev_timestamp = 0
running_accumulator = GENESIS_HASH
for i in range(count):
window = self.db.get_tau3(i)
if window is None:
return False, f"τ₃ #{i}: window not found"
if window.window_number != i:
return False, f"τ₃ #{i}: window_number mismatch"
if window.prev_tau3_hash != prev_hash:
return False, f"τ₃ #{i}: prev_hash mismatch"
if window.prev_accumulator and window.prev_accumulator != running_accumulator:
return False, f"τ₃ #{i}: prev_accumulator mismatch"
if window.timestamp <= prev_timestamp and i > 0:
return False, f"τ₃ #{i}: timestamp not increasing"
# Future timestamp check
now_ns = time.time_ns()
if window.timestamp > now_ns + MAX_TIMESTAMP_DRIFT_NS:
return False, f"τ₃ #{i}: timestamp from future"
# Signature MANDATORY
if not window.signature:
return False, f"τ₃ #{i}: missing signature (signatures are mandatory)"
pubkey = self.node_registry.get(window.node_id)
if pubkey:
if not verify_signature(pubkey, window.window_hash(), window.signature):
return False, f"τ₃ #{i}: INVALID ML-DSA-65 signature"
elif self.node_registry:
return False, f"τ₃ #{i}: unknown node {window.node_id[:16]}..."
whash = window.window_hash()
running_accumulator = compute_accumulator(running_accumulator, whash)
prev_hash = whash
prev_timestamp = window.timestamp
return True, f"OK ({count} τ₃ windows verified, sigs + accumulator intact)"
def verify_tau4_chain(self) -> Tuple[bool, str]:
"""
Горизонтальная целостность τ₄ + аккумулятор + подписи.
GPT-5.2 v3 fixes: mandatory signatures, future timestamp check.
"""
count = self.db.get_tau4_count()
if count == 0:
return True, "Empty τ₄ chain"
prev_hash = GENESIS_HASH
prev_timestamp = 0
running_accumulator = GENESIS_HASH
for i in range(count):
window = self.db.get_tau4(i)
if window is None:
return False, f"τ₄ #{i}: window not found"
if window.window_number != i:
return False, f"τ₄ #{i}: window_number mismatch"
if window.prev_tau4_hash != prev_hash:
return False, f"τ₄ #{i}: prev_hash mismatch"
if window.prev_accumulator and window.prev_accumulator != running_accumulator:
return False, f"τ₄ #{i}: prev_accumulator mismatch"
if window.timestamp <= prev_timestamp and i > 0:
return False, f"τ₄ #{i}: timestamp not increasing"
# Future timestamp check
now_ns = time.time_ns()
if window.timestamp > now_ns + MAX_TIMESTAMP_DRIFT_NS:
return False, f"τ₄ #{i}: timestamp from future"
# Signature MANDATORY
if not window.signature:
return False, f"τ₄ #{i}: missing signature (signatures are mandatory)"
pubkey = self.node_registry.get(window.node_id)
if pubkey:
if not verify_signature(pubkey, window.window_hash(), window.signature):
return False, f"τ₄ #{i}: INVALID ML-DSA-65 signature"
elif self.node_registry:
return False, f"τ₄ #{i}: unknown node {window.node_id[:16]}..."
whash = window.window_hash()
running_accumulator = compute_accumulator(running_accumulator, whash)
prev_hash = whash
prev_timestamp = window.timestamp
return True, f"OK ({count} τ₄ windows verified, sigs + accumulator intact)"
def verify_tau2_matryoshka(self, window_number: int) -> Tuple[bool, str]:
"""
Вертикальная матрёшка τ₂.
GPT-5.2 v3 fix #2: Verify contiguity + ordering of τ₁ headers.
GPT-5.2 v3 fix #4: Verify tau1_accumulator matches recomputed value.
"""
window = self.db.get_tau2(window_number)
if window is None:
return False, f"τ₂ #{window_number}: not found"
# Validate header count
if len(window.tau1_headers) != TAU1_PER_TAU2:
return False, (
f"τ₂ #{window_number}: expected {TAU1_PER_TAU2} τ₁ headers, "
f"got {len(window.tau1_headers)}"
)
# Merkle root
expected_root = merkle_root(window.tau1_headers)
if window.tau1_merkle_root != expected_root:
return False, (
f"τ₂ #{window_number}: merkle root mismatch "
f"(stored={window.tau1_merkle_root[:16]}..., computed={expected_root[:16]}...)"
)
# GPT-5.2 v3 fix #2: Existence + contiguity + ordering
prev_tau1_wnum = None
for i, tau1_hash in enumerate(window.tau1_headers):
# Validate hex format
if not _validate_hex_hash(tau1_hash):
return False, f"τ₂ #{window_number}: τ₁ header #{i} invalid hex"
tau1 = self.db.get_tau1_by_hash(tau1_hash)
if tau1 is None:
return False, f"τ₂ #{window_number}: τ₁ header #{i} not found ({tau1_hash[:16]}...)"
# Contiguity: window numbers must be sequential
if prev_tau1_wnum is not None:
if tau1.window_number != prev_tau1_wnum + 1:
return False, (
f"τ₂ #{window_number}: τ₁ headers not contiguous "
f"(#{i}: wnum {tau1.window_number} != expected {prev_tau1_wnum + 1})"
)
prev_tau1_wnum = tau1.window_number
return True, f"OK (τ₂ #{window_number}: {TAU1_PER_TAU2} τ₁ headers contiguous + merkle verified)"
def verify_tau3_matryoshka(self, window_number: int) -> Tuple[bool, str]:
"""
Вертикальная матрёшка τ₃.
Gemini R2 fix #1: Header count validation + O(1) hash-set lookup (not O(N) scan).
"""
window = self.db.get_tau3(window_number)
if window is None:
return False, f"τ₃ #{window_number}: not found"
# Gemini R2 fix #1: Validate header count (DoS protection)
if len(window.tau2_headers) != TAU2_PER_TAU3:
return False, (
f"τ₃ #{window_number}: expected {TAU2_PER_TAU3} τ₂ headers, "
f"got {len(window.tau2_headers)}"
)
expected_root = merkle_root(window.tau2_headers)
if window.tau2_merkle_root != expected_root:
return False, (
f"τ₃ #{window_number}: merkle root mismatch "
f"(stored={window.tau2_merkle_root[:16]}..., computed={expected_root[:16]}...)"
)
# GPT-5.2 R4 fix #5: O(1) lookup via hash set (not O(N) linear scan)
known_tau2_hashes = set(self.db.get_all_tau2_hashes())
for i, tau2_hash in enumerate(window.tau2_headers):
if not _validate_hex_hash(tau2_hash):
return False, f"τ₃ #{window_number}: τ₂ header #{i} invalid hex"
if tau2_hash not in known_tau2_hashes:
return False, f"τ₃ #{window_number}: τ₂ header #{i} not found ({tau2_hash[:16]}...)"
return True, f"OK (τ₃ #{window_number}: {TAU2_PER_TAU3} τ₂ headers exist + merkle verified)"
def verify_tau4_matryoshka(self, window_number: int) -> Tuple[bool, str]:
"""
Вертикальная матрёшка τ₄.
Gemini R2 fix #1: Header count validation + O(1) hash-set lookup (not O(N) scan).
"""
window = self.db.get_tau4(window_number)
if window is None:
return False, f"τ₄ #{window_number}: not found"
# Gemini R2 fix #1: Validate header count (DoS protection)
if len(window.tau3_headers) != TAU3_PER_TAU4:
return False, (
f"τ₄ #{window_number}: expected {TAU3_PER_TAU4} τ₃ headers, "
f"got {len(window.tau3_headers)}"
)
expected_root = merkle_root(window.tau3_headers)
if window.tau3_merkle_root != expected_root:
return False, (
f"τ₄ #{window_number}: merkle root mismatch "
f"(stored={window.tau3_merkle_root[:16]}..., computed={expected_root[:16]}...)"
)
# GPT-5.2 R4 fix #5: O(1) lookup via hash set (not O(N) linear scan)
known_tau3_hashes = set(self.db.get_all_tau3_hashes())
for i, tau3_hash in enumerate(window.tau3_headers):
if not _validate_hex_hash(tau3_hash):
return False, f"τ₄ #{window_number}: τ₃ header #{i} invalid hex"
if tau3_hash not in known_tau3_hashes:
return False, f"τ₄ #{window_number}: τ₃ header #{i} not found ({tau3_hash[:16]}...)"
return True, f"OK (τ₄ #{window_number}: {TAU3_PER_TAU4} τ₃ headers exist + merkle verified)"
def verify_window_signatures(self) -> Tuple[bool, str]:
"""
Проверка подписей ML-DSA-65 на всех τ₁ окнах времени.
GPT-5.2 fix #1: Strict — if node_registry exists, ALL signatures must verify.
Unknown nodes cause failure (not silent skip).
"""
count = self.db.get_tau1_count()
verified_count = 0
for i in range(count):
window = self.db.get_tau1(i)
if window is None:
return False, f"τ₁ #{i}: time window not found"
if not window.signature:
return False, f"τ₁ #{i}: missing signature"
if len(window.signature) != EXPECTED_SIGNATURE_HEX_LEN:
return False, (
f"τ₁ #{i}: invalid signature length "
f"(expected {EXPECTED_SIGNATURE_HEX_LEN}, got {len(window.signature)})"
)
pubkey = self.node_registry.get(window.node_id)
if pubkey:
whash = window.window_hash()
if not verify_signature(pubkey, whash, window.signature):
return False, f"τ₁ #{i}: INVALID ML-DSA-65 signature (node={window.node_id[:16]}...)"
verified_count += 1
elif self.node_registry:
# GPT-5.2 fix #1: Strict — unknown node is a failure when registry exists
return False, (
f"τ₁ #{i}: unknown node {window.node_id[:16]}... "
f"(not in registry of {len(self.node_registry)} nodes)"
)
if verified_count == count and count > 0:
return True, f"OK ({count} τ₁ signatures VERIFIED with ML-DSA-65)"
elif not self.node_registry:
return True, f"OK ({count} τ₁ signatures present, no registry for full verification)"
else:
return True, f"OK ({verified_count}/{count} τ₁ signatures verified)"
def verify_nts_anchors(self) -> Tuple[bool, str]:
"""
Verify NTS anchors for all τ₂ windows.
For each τ₂ window that has an NTS anchor:
1. Load anchor from DB
2. Recompute content_hash from window data
3. Verify anchor binds to correct content_hash
4. Verify anchor_hash matches window.nts_anchor_hash
5. Verify attestation requirements (12+ servers, 3+ regions)
"""
if not self.nts_service:
return True, "NTS verification skipped (no service configured)"
anchored = 0
for i in range(self.tau2_count):
window = self.db.get_tau2(i)
if window is None:
continue
# Skip windows without NTS anchor (pre-NTS or test windows)
if not window.nts_anchor_hash:
continue
anchor = self.db.get_nts_anchor("tau2", i)
if anchor is None:
return False, f"τ₂ #{i}: has nts_anchor_hash but no anchor data in DB"
# Verify content_hash binding
expected_content_hash = window.content_hash()
if anchor.content_hash != expected_content_hash:
return False, (
f"τ₂ #{i}: NTS content_hash mismatch "
f"(anchor={anchor.content_hash[:16]}..., "
f"window={expected_content_hash[:16]}...)"
)
# Verify anchor_hash matches window field
if anchor.anchor_hash != window.nts_anchor_hash:
return False, (
f"τ₂ #{i}: NTS anchor_hash mismatch "
f"(stored={window.nts_anchor_hash[:16]}..., "
f"anchor={anchor.anchor_hash[:16]}...)"
)
# Verify attestation integrity
ok, msg = self.nts_service.verify_anchor(anchor)
if not ok:
return False, f"τ₂ #{i}: NTS anchor verification failed: {msg}"
anchored += 1
return True, f"OK ({anchored} τ₂ windows NTS-anchored)"
def verify_full_chain(self) -> Tuple[bool, str]:
"""
Полная верификация Таймчейна от генезиса.
Запускает ВСЕ проверки (GPT-5.2 v2 expanded):
1. Горизонтальная целостность τ₁ (prev_hash + accumulator)
2. Горизонтальная целостность τ₂/τ₃/τ₄ (GPT-5.2 v2 fix #3)
3. Вертикальная матрёшка для каждого τ₂/τ₃/τ₄
4. Подписи τ₁ окон времени
5. UTXO supply invariant
6. NTS anchors (if NTS service configured)
Note: τ₂/τ₃/τ₄ signatures are verified inside verify_tau{2,3,4}_chain().
"""
errors = []
# 1. Horizontal τ₁ chain (prev_hash + accumulator + timestamps)
ok, msg = self.verify_tau1_chain()
if not ok:
errors.append(f"Horizontal τ₁: {msg}")
# 2. Horizontal τ₂/τ₃/τ₄ chains (GPT-5.2 v2 fix #3 + fix #1 signatures)
ok, msg = self.verify_tau2_chain()
if not ok:
errors.append(f"Horizontal τ₂: {msg}")
ok, msg = self.verify_tau3_chain()
if not ok:
errors.append(f"Horizontal τ₃: {msg}")
ok, msg = self.verify_tau4_chain()
if not ok:
errors.append(f"Horizontal τ₄: {msg}")
# 3. Vertical matryoshka (merkle roots)
for i in range(self.tau2_count):
ok, msg = self.verify_tau2_matryoshka(i)
if not ok:
errors.append(f"Vertical τ₂: {msg}")
for i in range(self.tau3_count):
ok, msg = self.verify_tau3_matryoshka(i)
if not ok:
errors.append(f"Vertical τ₃: {msg}")
for i in range(self.tau4_count):
ok, msg = self.verify_tau4_matryoshka(i)
if not ok:
errors.append(f"Vertical τ₄: {msg}")
# 4. τ₁ signatures (dedicated method with detailed checks)
ok, msg = self.verify_window_signatures()
if not ok:
errors.append(f"Signatures τ₁: {msg}")
# 5. UTXO supply invariant
ok, msg = self.utxo_set.verify_supply_invariant()
if not ok:
errors.append(f"UTXO supply: {msg}")
# 6. NTS anchors (cryptographic time anchoring)
ok, msg = self.verify_nts_anchors()
if not ok:
errors.append(f"NTS anchors: {msg}")
if errors:
return False, "; ".join(errors)
nts_count = self.db.get_nts_anchor_count("tau2")
return True, (
f"OK (τ₁={self.tau1_count}, τ₂={self.tau2_count}, "
f"τ₃={self.tau3_count}, τ₄={self.tau4_count}, "
f"UTXO supply={self.utxo_set.total_unspent()}Ɉ, "
f"NTS anchors={nts_count})"
)
# ═══════════════════════════════════════════════════════════════════════════
# BALANCE & STATS
# ═══════════════════════════════════════════════════════════════════════════
def get_balance(self, address: str) -> Dict:
return {
"confirmed": self.utxo_set.get_balance(address),
"utxo_count": len(self.utxo_set.get_utxos_for_address(address)),
"total_supply": self.utxo_set.total_unspent(),
}
def refresh_from_db(self):
"""
Refresh cached counts from DB (for API processes reading another node's data).
GPT-5.2 R5 fix #4: All reads in a SINGLE connection/transaction for consistency.
Prevents reading mixed old/new state during concurrent writes.
"""
with self.db._conn() as conn:
# All reads in one snapshot (WAL mode guarantees read isolation within connection)
self.tau1_count = conn.execute("SELECT COUNT(*) as cnt FROM tau1_windows").fetchone()["cnt"]
self.tau2_count = conn.execute("SELECT COUNT(*) as cnt FROM tau2_windows").fetchone()["cnt"]
self.tau3_count = conn.execute("SELECT COUNT(*) as cnt FROM tau3_windows").fetchone()["cnt"]
self.tau4_count = conn.execute("SELECT COUNT(*) as cnt FROM tau4_windows").fetchone()["cnt"]
def _get(key, default=""):
row = conn.execute("SELECT value FROM chain_state WHERE key = ?", (key,)).fetchone()
return row["value"] if row else default
self.last_tau1_hash = _get("last_tau1_hash", GENESIS_HASH)
self.last_tau2_hash = _get("last_tau2_hash", GENESIS_HASH)
self.time_bank_spent = int(_get("time_bank_spent", "0"))
self.tau1_accumulator = _get("tau1_accumulator", GENESIS_HASH)
self.tau2_accumulator = _get("tau2_accumulator", GENESIS_HASH)
self.tau3_accumulator = _get("tau3_accumulator", GENESIS_HASH)
self.tau4_accumulator = _get("tau4_accumulator", GENESIS_HASH)
# Gemini R2 fix #2: Use _safe_json_loads (crash protection)
self.pending_tau1_headers = _safe_json_loads(_get("pending_tau1_headers", "[]"), [])
self.pending_tau2_headers = _safe_json_loads(_get("pending_tau2_headers", "[]"), [])
self.pending_tau3_headers = _safe_json_loads(_get("pending_tau3_headers", "[]"), [])
def get_stats(self) -> Dict:
"""Статистика Таймчейна (live from DB)"""
self.refresh_from_db()
return {
"tau1_count": self.tau1_count,
"tau2_count": self.tau2_count,
"tau3_count": self.tau3_count,
"tau4_count": self.tau4_count,
"total_supply": self.utxo_set.total_unspent(),
"utxo_count": self.utxo_set.utxo_count(),
"time_bank_spent": self.time_bank_spent,
"time_bank_remaining": TIME_BANK_TOTAL_SECONDS - self.time_bank_spent,
"last_tau1_hash": self.last_tau1_hash[:16] + "...",
"last_tau2_hash": self.last_tau2_hash[:16] + "...",
"genesis_timestamp": GENESIS_TIMESTAMP_NS,
"pending_tau1": len(self.pending_tau1_headers),
"pending_tau2": len(self.pending_tau2_headers),
"pending_tau3": len(self.pending_tau3_headers),
}
# ─── Query methods for API ────────────────────────────────────────────
def get_recent_tau1_windows(self, limit: int = 20) -> List[Dict]:
windows = self.db.get_recent_tau1(limit)
return [w.to_dict() for w in windows]
def get_recent_tau2_windows(self, limit: int = 20) -> List[Dict]:
windows = self.db.get_recent_tau2(limit)
return [w.to_dict() for w in windows]
def get_recent_tau3_windows(self, limit: int = 10) -> List[Dict]:
windows = self.db.get_recent_tau3(limit)
return [w.to_dict() for w in windows]
def get_recent_tau4_windows(self, limit: int = 5) -> List[Dict]:
windows = self.db.get_recent_tau4(limit)
return [w.to_dict() for w in windows]
def get_all_balances(self) -> List[Dict]:
balances = self.utxo_set.all_balances()
result = []
for addr, balance in sorted(balances.items(), key=lambda x: -x[1]):
if balance > 0:
result.append({
"address": addr,
"balance": balance,
"utxo_count": len(self.utxo_set.get_utxos_for_address(addr)),
})
return result